Merge branch 'late/dt' into next/dt2
This is support for the ARM Chromebook, originally scheduled as a "late" pull request. Since it's already late now, we can combine this into the existing next/dt2 branch. * late/dt: ARM: exynos: dts: cros5250: add EC device ARM: dts: Add sbs-battery for exynos5250-snow ARM: dts: Add i2c-arbitrator bus for exynos5250-snow ARM: dts: Add chip-id controller node on Exynos4/5 SoC ARM: EXYNOS: Create virtual I/O mapping for Chip-ID controller using device tree
This commit is contained in:
commit
4183bef2e0
422 changed files with 4485 additions and 2533 deletions
|
|
@ -954,7 +954,7 @@ static inline int atapi_cdb_len(const u16 *dev_id)
|
|||
}
|
||||
}
|
||||
|
||||
static inline bool atapi_command_packet_set(const u16 *dev_id)
|
||||
static inline int atapi_command_packet_set(const u16 *dev_id)
|
||||
{
|
||||
return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@ struct cpu_vfs_cap_data {
|
|||
#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t))
|
||||
|
||||
|
||||
struct file;
|
||||
struct inode;
|
||||
struct dentry;
|
||||
struct user_namespace;
|
||||
|
|
@ -211,6 +212,7 @@ extern bool capable(int cap);
|
|||
extern bool ns_capable(struct user_namespace *ns, int cap);
|
||||
extern bool nsown_capable(int cap);
|
||||
extern bool inode_capable(const struct inode *inode, int cap);
|
||||
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
|
||||
|
||||
/* audit system wants to get cap info from files as well */
|
||||
extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
|
||||
|
|
|
|||
|
|
@ -141,11 +141,11 @@ typedef struct {
|
|||
} compat_sigset_t;
|
||||
|
||||
struct compat_sigaction {
|
||||
#ifndef __ARCH_HAS_ODD_SIGACTION
|
||||
#ifndef __ARCH_HAS_IRIX_SIGACTION
|
||||
compat_uptr_t sa_handler;
|
||||
compat_ulong_t sa_flags;
|
||||
#else
|
||||
compat_ulong_t sa_flags;
|
||||
compat_uint_t sa_flags;
|
||||
compat_uptr_t sa_handler;
|
||||
#endif
|
||||
#ifdef __ARCH_HAS_SA_RESTORER
|
||||
|
|
|
|||
|
|
@ -213,7 +213,7 @@ struct devfreq_simple_ondemand_data {
|
|||
#endif
|
||||
|
||||
#else /* !CONFIG_PM_DEVFREQ */
|
||||
static struct devfreq *devfreq_add_device(struct device *dev,
|
||||
static inline struct devfreq *devfreq_add_device(struct device *dev,
|
||||
struct devfreq_dev_profile *profile,
|
||||
const char *governor_name,
|
||||
void *data)
|
||||
|
|
@ -221,34 +221,34 @@ static struct devfreq *devfreq_add_device(struct device *dev,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int devfreq_remove_device(struct devfreq *devfreq)
|
||||
static inline int devfreq_remove_device(struct devfreq *devfreq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int devfreq_suspend_device(struct devfreq *devfreq)
|
||||
static inline int devfreq_suspend_device(struct devfreq *devfreq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int devfreq_resume_device(struct devfreq *devfreq)
|
||||
static inline int devfreq_resume_device(struct devfreq *devfreq)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct opp *devfreq_recommended_opp(struct device *dev,
|
||||
static inline struct opp *devfreq_recommended_opp(struct device *dev,
|
||||
unsigned long *freq, u32 flags)
|
||||
{
|
||||
return -EINVAL;
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static int devfreq_register_opp_notifier(struct device *dev,
|
||||
static inline int devfreq_register_opp_notifier(struct device *dev,
|
||||
struct devfreq *devfreq)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int devfreq_unregister_opp_notifier(struct device *dev,
|
||||
static inline int devfreq_unregister_opp_notifier(struct device *dev,
|
||||
struct devfreq *devfreq)
|
||||
{
|
||||
return -EINVAL;
|
||||
|
|
|
|||
|
|
@ -89,6 +89,7 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
|
|||
* that the call back has its own recursion protection. If it does
|
||||
* not set this, then the ftrace infrastructure will add recursion
|
||||
* protection for the caller.
|
||||
* STUB - The ftrace_ops is just a place holder.
|
||||
*/
|
||||
enum {
|
||||
FTRACE_OPS_FL_ENABLED = 1 << 0,
|
||||
|
|
@ -98,6 +99,7 @@ enum {
|
|||
FTRACE_OPS_FL_SAVE_REGS = 1 << 4,
|
||||
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
|
||||
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
|
||||
FTRACE_OPS_FL_STUB = 1 << 7,
|
||||
};
|
||||
|
||||
struct ftrace_ops {
|
||||
|
|
@ -394,7 +396,6 @@ ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
|
|||
size_t cnt, loff_t *ppos);
|
||||
ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
|
||||
size_t cnt, loff_t *ppos);
|
||||
loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int whence);
|
||||
int ftrace_regex_release(struct inode *inode, struct file *file);
|
||||
|
||||
void __init
|
||||
|
|
@ -567,6 +568,8 @@ static inline int
|
|||
ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
loff_t ftrace_filter_lseek(struct file *file, loff_t offset, int whence);
|
||||
|
||||
/* totally disable ftrace - can not re-enable after this */
|
||||
void ftrace_kill(void);
|
||||
|
||||
|
|
|
|||
|
|
@ -518,7 +518,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
|
|||
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
||||
void *data, unsigned long len);
|
||||
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
||||
gpa_t gpa);
|
||||
gpa_t gpa, unsigned long len);
|
||||
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
|
||||
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
|
||||
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
|
||||
|
|
|
|||
|
|
@ -71,6 +71,7 @@ struct gfn_to_hva_cache {
|
|||
u64 generation;
|
||||
gpa_t gpa;
|
||||
unsigned long hva;
|
||||
unsigned long len;
|
||||
struct kvm_memory_slot *memslot;
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -398,6 +398,7 @@ enum {
|
|||
ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */
|
||||
ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
|
||||
ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
|
||||
ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
|
||||
|
||||
/* DMA mask for user DMA control: User visible values; DO NOT
|
||||
renumber */
|
||||
|
|
|
|||
|
|
@ -210,9 +210,9 @@ struct netdev_hw_addr {
|
|||
#define NETDEV_HW_ADDR_T_SLAVE 3
|
||||
#define NETDEV_HW_ADDR_T_UNICAST 4
|
||||
#define NETDEV_HW_ADDR_T_MULTICAST 5
|
||||
bool synced;
|
||||
bool global_use;
|
||||
int refcount;
|
||||
int synced;
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
|
|
@ -895,7 +895,7 @@ struct netdev_fcoe_hbainfo {
|
|||
*
|
||||
* int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
|
||||
* int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
|
||||
* struct net_device *dev)
|
||||
* struct net_device *dev, u32 filter_mask)
|
||||
*
|
||||
* int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
|
||||
* Called to change device carrier. Soft-devices (like dummy, team, etc)
|
||||
|
|
|
|||
|
|
@ -916,6 +916,7 @@ void pci_disable_rom(struct pci_dev *pdev);
|
|||
void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
|
||||
void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
|
||||
size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
|
||||
void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
|
||||
|
||||
/* Power management related routines */
|
||||
int pci_save_state(struct pci_dev *dev);
|
||||
|
|
|
|||
|
|
@ -93,14 +93,20 @@ do { \
|
|||
|
||||
#else /* !CONFIG_PREEMPT_COUNT */
|
||||
|
||||
#define preempt_disable() do { } while (0)
|
||||
#define sched_preempt_enable_no_resched() do { } while (0)
|
||||
#define preempt_enable_no_resched() do { } while (0)
|
||||
#define preempt_enable() do { } while (0)
|
||||
/*
|
||||
* Even if we don't have any preemption, we need preempt disable/enable
|
||||
* to be barriers, so that we don't have things like get_user/put_user
|
||||
* that can cause faults and scheduling migrate into our preempt-protected
|
||||
* region.
|
||||
*/
|
||||
#define preempt_disable() barrier()
|
||||
#define sched_preempt_enable_no_resched() barrier()
|
||||
#define preempt_enable_no_resched() barrier()
|
||||
#define preempt_enable() barrier()
|
||||
|
||||
#define preempt_disable_notrace() do { } while (0)
|
||||
#define preempt_enable_no_resched_notrace() do { } while (0)
|
||||
#define preempt_enable_notrace() do { } while (0)
|
||||
#define preempt_disable_notrace() barrier()
|
||||
#define preempt_enable_no_resched_notrace() barrier()
|
||||
#define preempt_enable_notrace() barrier()
|
||||
|
||||
#endif /* CONFIG_PREEMPT_COUNT */
|
||||
|
||||
|
|
|
|||
|
|
@ -117,6 +117,7 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
|
|||
const struct file_operations *proc_fops,
|
||||
void *data);
|
||||
extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent);
|
||||
extern int remove_proc_subtree(const char *name, struct proc_dir_entry *parent);
|
||||
|
||||
struct pid_namespace;
|
||||
|
||||
|
|
@ -202,6 +203,7 @@ static inline struct proc_dir_entry *proc_create_data(const char *name,
|
|||
return NULL;
|
||||
}
|
||||
#define remove_proc_entry(name, parent) do {} while (0)
|
||||
#define remove_proc_subtree(name, parent) do {} while (0)
|
||||
|
||||
static inline struct proc_dir_entry *proc_symlink(const char *name,
|
||||
struct proc_dir_entry *parent,const char *dest) {return NULL;}
|
||||
|
|
|
|||
|
|
@ -1012,6 +1012,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
|
|||
* This hook can be used by the module to update any security state
|
||||
* associated with the TUN device's security structure.
|
||||
* @security pointer to the TUN devices's security structure.
|
||||
* @skb_owned_by:
|
||||
* This hook sets the packet's owning sock.
|
||||
* @skb is the packet.
|
||||
* @sk the sock which owns the packet.
|
||||
*
|
||||
* Security hooks for XFRM operations.
|
||||
*
|
||||
|
|
@ -1638,6 +1642,7 @@ struct security_operations {
|
|||
int (*tun_dev_attach_queue) (void *security);
|
||||
int (*tun_dev_attach) (struct sock *sk, void *security);
|
||||
int (*tun_dev_open) (void *security);
|
||||
void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk);
|
||||
#endif /* CONFIG_SECURITY_NETWORK */
|
||||
|
||||
#ifdef CONFIG_SECURITY_NETWORK_XFRM
|
||||
|
|
@ -2588,6 +2593,8 @@ int security_tun_dev_attach_queue(void *security);
|
|||
int security_tun_dev_attach(struct sock *sk, void *security);
|
||||
int security_tun_dev_open(void *security);
|
||||
|
||||
void security_skb_owned_by(struct sk_buff *skb, struct sock *sk);
|
||||
|
||||
#else /* CONFIG_SECURITY_NETWORK */
|
||||
static inline int security_unix_stream_connect(struct sock *sock,
|
||||
struct sock *other,
|
||||
|
|
@ -2779,6 +2786,11 @@ static inline int security_tun_dev_open(void *security)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SECURITY_NETWORK */
|
||||
|
||||
#ifdef CONFIG_SECURITY_NETWORK_XFRM
|
||||
|
|
|
|||
|
|
@ -250,11 +250,11 @@ extern int show_unhandled_signals;
|
|||
extern int sigsuspend(sigset_t *);
|
||||
|
||||
struct sigaction {
|
||||
#ifndef __ARCH_HAS_ODD_SIGACTION
|
||||
#ifndef __ARCH_HAS_IRIX_SIGACTION
|
||||
__sighandler_t sa_handler;
|
||||
unsigned long sa_flags;
|
||||
#else
|
||||
unsigned long sa_flags;
|
||||
unsigned int sa_flags;
|
||||
__sighandler_t sa_handler;
|
||||
#endif
|
||||
#ifdef __ARCH_HAS_SA_RESTORER
|
||||
|
|
|
|||
|
|
@ -2643,6 +2643,13 @@ static inline void nf_reset(struct sk_buff *skb)
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline void nf_reset_trace(struct sk_buff *skb)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
|
||||
skb->nf_trace = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Note: This doesn't put any conntrack and bridge info in dst. */
|
||||
static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -16,7 +16,10 @@
|
|||
* In the debug case, 1 means unlocked, 0 means locked. (the values
|
||||
* are inverted, to catch initialization bugs)
|
||||
*
|
||||
* No atomicity anywhere, we are on UP.
|
||||
* No atomicity anywhere, we are on UP. However, we still need
|
||||
* the compiler barriers, because we do not want the compiler to
|
||||
* move potentially faulting instructions (notably user accesses)
|
||||
* into the locked sequence, resulting in non-atomic execution.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK
|
||||
|
|
@ -25,6 +28,7 @@
|
|||
static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
{
|
||||
lock->slock = 0;
|
||||
barrier();
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
|
@ -32,6 +36,7 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
|
|||
{
|
||||
local_irq_save(flags);
|
||||
lock->slock = 0;
|
||||
barrier();
|
||||
}
|
||||
|
||||
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||
|
|
@ -39,32 +44,34 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
|||
char oldval = lock->slock;
|
||||
|
||||
lock->slock = 0;
|
||||
barrier();
|
||||
|
||||
return oldval > 0;
|
||||
}
|
||||
|
||||
static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
barrier();
|
||||
lock->slock = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read-write spinlocks. No debug version.
|
||||
*/
|
||||
#define arch_read_lock(lock) do { (void)(lock); } while (0)
|
||||
#define arch_write_lock(lock) do { (void)(lock); } while (0)
|
||||
#define arch_read_trylock(lock) ({ (void)(lock); 1; })
|
||||
#define arch_write_trylock(lock) ({ (void)(lock); 1; })
|
||||
#define arch_read_unlock(lock) do { (void)(lock); } while (0)
|
||||
#define arch_write_unlock(lock) do { (void)(lock); } while (0)
|
||||
#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
|
||||
#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
|
||||
#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
|
||||
#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
|
||||
#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
|
||||
#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
|
||||
|
||||
#else /* DEBUG_SPINLOCK */
|
||||
#define arch_spin_is_locked(lock) ((void)(lock), 0)
|
||||
/* for sched.c and kernel_lock.c: */
|
||||
# define arch_spin_lock(lock) do { (void)(lock); } while (0)
|
||||
# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
|
||||
# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
|
||||
# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
|
||||
# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0)
|
||||
# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0)
|
||||
# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
|
||||
# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
|
||||
#endif /* DEBUG_SPINLOCK */
|
||||
|
||||
#define arch_spin_is_contended(lock) (((void)(lock), 0))
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue