Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
commit
e2a7c34fb2
199 changed files with 1218 additions and 655 deletions
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
struct pts_fs_info;
|
||||
|
||||
struct pts_fs_info *devpts_acquire(struct file *);
|
||||
struct pts_fs_info *devpts_acquire(struct file *, struct vfsmount **ptsmnt);
|
||||
void devpts_release(struct pts_fs_info *);
|
||||
|
||||
int devpts_new_index(struct pts_fs_info *);
|
||||
|
|
|
|||
|
|
@ -61,6 +61,7 @@ extern int memblock_debug;
|
|||
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
|
||||
#define __init_memblock __meminit
|
||||
#define __initdata_memblock __meminitdata
|
||||
void memblock_discard(void);
|
||||
#else
|
||||
#define __init_memblock
|
||||
#define __initdata_memblock
|
||||
|
|
@ -74,8 +75,6 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
|
|||
int nid, ulong flags);
|
||||
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
|
||||
phys_addr_t size, phys_addr_t align);
|
||||
phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
|
||||
phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
|
||||
void memblock_allow_resize(void);
|
||||
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
|
||||
int memblock_add(phys_addr_t base, phys_addr_t size);
|
||||
|
|
@ -110,6 +109,9 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
|
|||
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
|
||||
phys_addr_t *out_end);
|
||||
|
||||
void __memblock_free_early(phys_addr_t base, phys_addr_t size);
|
||||
void __memblock_free_late(phys_addr_t base, phys_addr_t size);
|
||||
|
||||
/**
|
||||
* for_each_mem_range - iterate through memblock areas from type_a and not
|
||||
* included in type_b. Or just type_a if type_b is NULL.
|
||||
|
|
|
|||
|
|
@ -484,7 +484,8 @@ bool mem_cgroup_oom_synchronize(bool wait);
|
|||
extern int do_swap_account;
|
||||
#endif
|
||||
|
||||
void lock_page_memcg(struct page *page);
|
||||
struct mem_cgroup *lock_page_memcg(struct page *page);
|
||||
void __unlock_page_memcg(struct mem_cgroup *memcg);
|
||||
void unlock_page_memcg(struct page *page);
|
||||
|
||||
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
|
||||
|
|
@ -809,7 +810,12 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void lock_page_memcg(struct page *page)
|
||||
static inline struct mem_cgroup *lock_page_memcg(struct page *page)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -168,6 +168,14 @@ extern int sysctl_hardlockup_all_cpu_backtrace;
|
|||
#define sysctl_softlockup_all_cpu_backtrace 0
|
||||
#define sysctl_hardlockup_all_cpu_backtrace 0
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \
|
||||
defined(CONFIG_HARDLOCKUP_DETECTOR)
|
||||
void watchdog_update_hrtimer_threshold(u64 period);
|
||||
#else
|
||||
static inline void watchdog_update_hrtimer_threshold(u64 period) { }
|
||||
#endif
|
||||
|
||||
extern bool is_hardlockup(void);
|
||||
struct ctl_table;
|
||||
extern int proc_watchdog(struct ctl_table *, int ,
|
||||
|
|
|
|||
|
|
@ -6,6 +6,8 @@
|
|||
#include <linux/types.h>
|
||||
#include <linux/nodemask.h>
|
||||
#include <uapi/linux/oom.h>
|
||||
#include <linux/sched/coredump.h> /* MMF_* */
|
||||
#include <linux/mm.h> /* VM_FAULT* */
|
||||
|
||||
struct zonelist;
|
||||
struct notifier_block;
|
||||
|
|
@ -63,6 +65,26 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk)
|
|||
return tsk->signal->oom_mm;
|
||||
}
|
||||
|
||||
/*
|
||||
* Checks whether a page fault on the given mm is still reliable.
|
||||
* This is no longer true if the oom reaper started to reap the
|
||||
* address space which is reflected by MMF_UNSTABLE flag set in
|
||||
* the mm. At that moment any !shared mapping would lose the content
|
||||
* and could cause a memory corruption (zero pages instead of the
|
||||
* original content).
|
||||
*
|
||||
* User should call this before establishing a page table entry for
|
||||
* a !shared mapping and under the proper page table lock.
|
||||
*
|
||||
* Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
|
||||
*/
|
||||
static inline int check_stable_address_space(struct mm_struct *mm)
|
||||
{
|
||||
if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
|
||||
return VM_FAULT_SIGBUS;
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern unsigned long oom_badness(struct task_struct *p,
|
||||
struct mem_cgroup *memcg, const nodemask_t *nodemask,
|
||||
unsigned long totalpages);
|
||||
|
|
|
|||
|
|
@ -310,8 +310,8 @@ struct pmu {
|
|||
* Notification that the event was mapped or unmapped. Called
|
||||
* in the context of the mapping task.
|
||||
*/
|
||||
void (*event_mapped) (struct perf_event *event); /*optional*/
|
||||
void (*event_unmapped) (struct perf_event *event); /*optional*/
|
||||
void (*event_mapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
|
||||
void (*event_unmapped) (struct perf_event *event, struct mm_struct *mm); /* optional */
|
||||
|
||||
/*
|
||||
* Flags for ->add()/->del()/ ->start()/->stop(). There are
|
||||
|
|
|
|||
|
|
@ -8,7 +8,9 @@ enum pid_type
|
|||
PIDTYPE_PID,
|
||||
PIDTYPE_PGID,
|
||||
PIDTYPE_SID,
|
||||
PIDTYPE_MAX
|
||||
PIDTYPE_MAX,
|
||||
/* only valid to __task_pid_nr_ns() */
|
||||
__PIDTYPE_TGID
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -436,9 +436,9 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
|
|||
__PTR_RING_PEEK_CALL_v; \
|
||||
})
|
||||
|
||||
static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
|
||||
static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
|
||||
{
|
||||
return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
|
||||
return kcalloc(size, sizeof(void *), gfp);
|
||||
}
|
||||
|
||||
static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
|
||||
|
|
@ -582,7 +582,8 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
|
|||
* In particular if you consume ring in interrupt or BH context, you must
|
||||
* disable interrupts/BH when doing so.
|
||||
*/
|
||||
static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
|
||||
static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
|
||||
unsigned int nrings,
|
||||
int size,
|
||||
gfp_t gfp, void (*destroy)(void *))
|
||||
{
|
||||
|
|
@ -590,7 +591,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
|
|||
void ***queues;
|
||||
int i;
|
||||
|
||||
queues = kmalloc(nrings * sizeof *queues, gfp);
|
||||
queues = kmalloc_array(nrings, sizeof(*queues), gfp);
|
||||
if (!queues)
|
||||
goto noqueues;
|
||||
|
||||
|
|
|
|||
|
|
@ -1163,13 +1163,6 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk)
|
|||
return tsk->tgid;
|
||||
}
|
||||
|
||||
extern pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
|
||||
|
||||
static inline pid_t task_tgid_vnr(struct task_struct *tsk)
|
||||
{
|
||||
return pid_vnr(task_tgid(tsk));
|
||||
}
|
||||
|
||||
/**
|
||||
* pid_alive - check that a task structure is not stale
|
||||
* @p: Task structure to be checked.
|
||||
|
|
@ -1185,23 +1178,6 @@ static inline int pid_alive(const struct task_struct *p)
|
|||
return p->pids[PIDTYPE_PID].pid != NULL;
|
||||
}
|
||||
|
||||
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
|
||||
{
|
||||
pid_t pid = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
if (pid_alive(tsk))
|
||||
pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
|
||||
rcu_read_unlock();
|
||||
|
||||
return pid;
|
||||
}
|
||||
|
||||
static inline pid_t task_ppid_nr(const struct task_struct *tsk)
|
||||
{
|
||||
return task_ppid_nr_ns(tsk, &init_pid_ns);
|
||||
}
|
||||
|
||||
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
|
||||
{
|
||||
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
|
||||
|
|
@ -1223,6 +1199,33 @@ static inline pid_t task_session_vnr(struct task_struct *tsk)
|
|||
return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
|
||||
}
|
||||
|
||||
static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
|
||||
{
|
||||
return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
|
||||
}
|
||||
|
||||
static inline pid_t task_tgid_vnr(struct task_struct *tsk)
|
||||
{
|
||||
return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
|
||||
}
|
||||
|
||||
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
|
||||
{
|
||||
pid_t pid = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
if (pid_alive(tsk))
|
||||
pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
|
||||
rcu_read_unlock();
|
||||
|
||||
return pid;
|
||||
}
|
||||
|
||||
static inline pid_t task_ppid_nr(const struct task_struct *tsk)
|
||||
{
|
||||
return task_ppid_nr_ns(tsk, &init_pid_ns);
|
||||
}
|
||||
|
||||
/* Obsolete, do not use: */
|
||||
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -193,7 +193,8 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
|
|||
}
|
||||
|
||||
static inline int skb_array_resize_multiple(struct skb_array **rings,
|
||||
int nrings, int size, gfp_t gfp)
|
||||
int nrings, unsigned int size,
|
||||
gfp_t gfp)
|
||||
{
|
||||
BUILD_BUG_ON(offsetof(struct skb_array, ring));
|
||||
return ptr_ring_resize_multiple((struct ptr_ring **)rings,
|
||||
|
|
|
|||
|
|
@ -757,6 +757,43 @@ extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
|
|||
__ret; \
|
||||
})
|
||||
|
||||
#define __wait_event_killable_timeout(wq_head, condition, timeout) \
|
||||
___wait_event(wq_head, ___wait_cond_timeout(condition), \
|
||||
TASK_KILLABLE, 0, timeout, \
|
||||
__ret = schedule_timeout(__ret))
|
||||
|
||||
/**
|
||||
* wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
|
||||
* @wq_head: the waitqueue to wait on
|
||||
* @condition: a C expression for the event to wait for
|
||||
* @timeout: timeout, in jiffies
|
||||
*
|
||||
* The process is put to sleep (TASK_KILLABLE) until the
|
||||
* @condition evaluates to true or a kill signal is received.
|
||||
* The @condition is checked each time the waitqueue @wq_head is woken up.
|
||||
*
|
||||
* wake_up() has to be called after changing any variable that could
|
||||
* change the result of the wait condition.
|
||||
*
|
||||
* Returns:
|
||||
* 0 if the @condition evaluated to %false after the @timeout elapsed,
|
||||
* 1 if the @condition evaluated to %true after the @timeout elapsed,
|
||||
* the remaining jiffies (at least 1) if the @condition evaluated
|
||||
* to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
|
||||
* interrupted by a kill signal.
|
||||
*
|
||||
* Only kill signals interrupt this process.
|
||||
*/
|
||||
#define wait_event_killable_timeout(wq_head, condition, timeout) \
|
||||
({ \
|
||||
long __ret = timeout; \
|
||||
might_sleep(); \
|
||||
if (!___wait_cond_timeout(condition)) \
|
||||
__ret = __wait_event_killable_timeout(wq_head, \
|
||||
condition, timeout); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
|
||||
#define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
|
||||
(void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue