Merge branch 'akpm' (incoming from Andrew)
Merge second patch-bomb from Andrew Morton: - various misc bits - the rest of MM - add generic fixmap.h, use it - backlight updates - dynamic_debug updates - printk() updates - checkpatch updates - binfmt_elf - ramfs - init/ - autofs4 - drivers/rtc - nilfs - hfsplus - Documentation/ - coredump - procfs - fork - exec - kexec - kdump - partitions - rapidio - rbtree - userns - memstick - w1 - decompressors * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (197 commits) lib/decompress_unlz4.c: always set an error return code on failures romfs: fix returm err while getting inode in fill_super drivers/w1/masters/w1-gpio.c: add strong pullup emulation drivers/memstick/host/rtsx_pci_ms.c: fix ms card data transfer bug userns: relax the posix_acl_valid() checks arch/sh/kernel/dwarf.c: use rbtree postorder iteration helper instead of solution using repeated rb_erase() fs-ext3-use-rbtree-postorder-iteration-helper-instead-of-opencoding-fix fs/ext3: use rbtree postorder iteration helper instead of opencoding fs/jffs2: use rbtree postorder iteration helper instead of opencoding fs/ext4: use rbtree postorder iteration helper instead of opencoding fs/ubifs: use rbtree postorder iteration helper instead of opencoding net/netfilter/ipset/ip_set_hash_netiface.c: use rbtree postorder iteration instead of opencoding rbtree/test: test rbtree_postorder_for_each_entry_safe() rbtree/test: move rb_node to the middle of the test struct rapidio: add modular rapidio core build into powerpc and mips branches partitions/efi: complete documentation of gpt kernel param purpose kdump: add /sys/kernel/vmcoreinfo ABI documentation kdump: fix exported size of vmcoreinfo note kexec: add sysctl to disable kexec_load fs/exec.c: call arch_pick_mmap_layout() only once ...
This commit is contained in:
commit
3aacd625f2
231 changed files with 3339 additions and 2187 deletions
|
|
@ -1,11 +1,11 @@
|
|||
#ifndef __LINUX_CACHE_H
|
||||
#define __LINUX_CACHE_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <uapi/linux/kernel.h>
|
||||
#include <asm/cache.h>
|
||||
|
||||
#ifndef L1_CACHE_ALIGN
|
||||
#define L1_CACHE_ALIGN(x) ALIGN(x, L1_CACHE_BYTES)
|
||||
#define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
|
||||
#endif
|
||||
|
||||
#ifndef SMP_CACHE_BYTES
|
||||
|
|
|
|||
|
|
@ -8,23 +8,6 @@
|
|||
|
||||
#include <linux/ceph/types.h>
|
||||
|
||||
/* This seemed to be the easiest place to define these */
|
||||
|
||||
#define U8_MAX ((u8)(~0U))
|
||||
#define U16_MAX ((u16)(~0U))
|
||||
#define U32_MAX ((u32)(~0U))
|
||||
#define U64_MAX ((u64)(~0ULL))
|
||||
|
||||
#define S8_MAX ((s8)(U8_MAX >> 1))
|
||||
#define S16_MAX ((s16)(U16_MAX >> 1))
|
||||
#define S32_MAX ((s32)(U32_MAX >> 1))
|
||||
#define S64_MAX ((s64)(U64_MAX >> 1LL))
|
||||
|
||||
#define S8_MIN ((s8)(-S8_MAX - 1))
|
||||
#define S16_MIN ((s16)(-S16_MAX - 1))
|
||||
#define S32_MIN ((s32)(-S32_MAX - 1))
|
||||
#define S64_MIN ((s64)(-S64_MAX - 1LL))
|
||||
|
||||
/*
|
||||
* in all cases,
|
||||
* void **p pointer to position pointer
|
||||
|
|
|
|||
|
|
@ -30,6 +30,8 @@
|
|||
#ifndef __GENALLOC_H__
|
||||
#define __GENALLOC_H__
|
||||
|
||||
#include <linux/spinlock_types.h>
|
||||
|
||||
struct device;
|
||||
struct device_node;
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
#ifndef __LINUX_GFP_H
|
||||
#define __LINUX_GFP_H
|
||||
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/linkage.h>
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
#define _LINUX_HUGETLB_H
|
||||
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/hugetlb_inline.h>
|
||||
#include <linux/cgroup.h>
|
||||
|
|
@ -354,7 +355,7 @@ static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
|
|||
|
||||
static inline struct hstate *page_hstate(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageHuge(page));
|
||||
VM_BUG_ON_PAGE(!PageHuge(page), page);
|
||||
return size_to_hstate(PAGE_SIZE << compound_order(page));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
#ifndef _LINUX_HUGETLB_CGROUP_H
|
||||
#define _LINUX_HUGETLB_CGROUP_H
|
||||
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/res_counter.h>
|
||||
|
||||
struct hugetlb_cgroup;
|
||||
|
|
@ -28,7 +29,7 @@ struct hugetlb_cgroup;
|
|||
|
||||
static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageHuge(page));
|
||||
VM_BUG_ON_PAGE(!PageHuge(page), page);
|
||||
|
||||
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
|
||||
return NULL;
|
||||
|
|
@ -38,7 +39,7 @@ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
|
|||
static inline
|
||||
int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
|
||||
{
|
||||
VM_BUG_ON(!PageHuge(page));
|
||||
VM_BUG_ON_PAGE(!PageHuge(page), page);
|
||||
|
||||
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
|
||||
return -1;
|
||||
|
|
|
|||
|
|
@ -29,6 +29,19 @@
|
|||
#define ULLONG_MAX (~0ULL)
|
||||
#define SIZE_MAX (~(size_t)0)
|
||||
|
||||
#define U8_MAX ((u8)~0U)
|
||||
#define S8_MAX ((s8)(U8_MAX>>1))
|
||||
#define S8_MIN ((s8)(-S8_MAX - 1))
|
||||
#define U16_MAX ((u16)~0U)
|
||||
#define S16_MAX ((s16)(U16_MAX>>1))
|
||||
#define S16_MIN ((s16)(-S16_MAX - 1))
|
||||
#define U32_MAX ((u32)~0U)
|
||||
#define S32_MAX ((s32)(U32_MAX>>1))
|
||||
#define S32_MIN ((s32)(-S32_MAX - 1))
|
||||
#define U64_MAX ((u64)~0ULL)
|
||||
#define S64_MAX ((s64)(U64_MAX>>1))
|
||||
#define S64_MIN ((s64)(-S64_MAX - 1))
|
||||
|
||||
#define STACK_MAGIC 0xdeadbeef
|
||||
|
||||
#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
|
||||
|
|
|
|||
|
|
@ -170,6 +170,7 @@ unsigned long paddr_vmcoreinfo_note(void);
|
|||
|
||||
extern struct kimage *kexec_image;
|
||||
extern struct kimage *kexec_crash_image;
|
||||
extern int kexec_load_disabled;
|
||||
|
||||
#ifndef kexec_flush_icache_page
|
||||
#define kexec_flush_icache_page(page)
|
||||
|
|
|
|||
|
|
@ -61,6 +61,7 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
|
|||
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
|
||||
phys_addr_t size, phys_addr_t align);
|
||||
phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
|
||||
phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
|
||||
void memblock_allow_resize(void);
|
||||
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
|
||||
int memblock_add(phys_addr_t base, phys_addr_t size);
|
||||
|
|
|
|||
|
|
@ -497,10 +497,11 @@ void __memcg_kmem_commit_charge(struct page *page,
|
|||
void __memcg_kmem_uncharge_pages(struct page *page, int order);
|
||||
|
||||
int memcg_cache_id(struct mem_cgroup *memcg);
|
||||
int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
|
||||
struct kmem_cache *root_cache);
|
||||
void memcg_release_cache(struct kmem_cache *cachep);
|
||||
void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep);
|
||||
int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
|
||||
struct kmem_cache *root_cache);
|
||||
void memcg_free_cache_params(struct kmem_cache *s);
|
||||
void memcg_register_cache(struct kmem_cache *s);
|
||||
void memcg_unregister_cache(struct kmem_cache *s);
|
||||
|
||||
int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
|
||||
void memcg_update_array_size(int num_groups);
|
||||
|
|
@ -640,19 +641,21 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
|
|||
return -1;
|
||||
}
|
||||
|
||||
static inline int
|
||||
memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
|
||||
struct kmem_cache *root_cache)
|
||||
static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
|
||||
struct kmem_cache *s, struct kmem_cache *root_cache)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void memcg_release_cache(struct kmem_cache *cachep)
|
||||
static inline void memcg_free_cache_params(struct kmem_cache *s)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void memcg_cache_list_add(struct mem_cgroup *memcg,
|
||||
struct kmem_cache *s)
|
||||
static inline void memcg_register_cache(struct kmem_cache *s)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void memcg_unregister_cache(struct kmem_cache *s)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/list.h>
|
||||
|
|
@ -303,7 +304,7 @@ static inline int get_freepage_migratetype(struct page *page)
|
|||
*/
|
||||
static inline int put_page_testzero(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(atomic_read(&page->_count) == 0);
|
||||
VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
|
||||
return atomic_dec_and_test(&page->_count);
|
||||
}
|
||||
|
||||
|
|
@ -364,7 +365,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
|
|||
static inline void compound_lock(struct page *page)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
VM_BUG_ON(PageSlab(page));
|
||||
VM_BUG_ON_PAGE(PageSlab(page), page);
|
||||
bit_spin_lock(PG_compound_lock, &page->flags);
|
||||
#endif
|
||||
}
|
||||
|
|
@ -372,7 +373,7 @@ static inline void compound_lock(struct page *page)
|
|||
static inline void compound_unlock(struct page *page)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
VM_BUG_ON(PageSlab(page));
|
||||
VM_BUG_ON_PAGE(PageSlab(page), page);
|
||||
bit_spin_unlock(PG_compound_lock, &page->flags);
|
||||
#endif
|
||||
}
|
||||
|
|
@ -447,7 +448,7 @@ static inline bool __compound_tail_refcounted(struct page *page)
|
|||
*/
|
||||
static inline bool compound_tail_refcounted(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageHead(page));
|
||||
VM_BUG_ON_PAGE(!PageHead(page), page);
|
||||
return __compound_tail_refcounted(page);
|
||||
}
|
||||
|
||||
|
|
@ -456,9 +457,9 @@ static inline void get_huge_page_tail(struct page *page)
|
|||
/*
|
||||
* __split_huge_page_refcount() cannot run from under us.
|
||||
*/
|
||||
VM_BUG_ON(!PageTail(page));
|
||||
VM_BUG_ON(page_mapcount(page) < 0);
|
||||
VM_BUG_ON(atomic_read(&page->_count) != 0);
|
||||
VM_BUG_ON_PAGE(!PageTail(page), page);
|
||||
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
|
||||
VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
|
||||
if (compound_tail_refcounted(page->first_page))
|
||||
atomic_inc(&page->_mapcount);
|
||||
}
|
||||
|
|
@ -474,7 +475,7 @@ static inline void get_page(struct page *page)
|
|||
* Getting a normal page or the head of a compound page
|
||||
* requires to already have an elevated page->_count.
|
||||
*/
|
||||
VM_BUG_ON(atomic_read(&page->_count) <= 0);
|
||||
VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
|
||||
atomic_inc(&page->_count);
|
||||
}
|
||||
|
||||
|
|
@ -511,13 +512,13 @@ static inline int PageBuddy(struct page *page)
|
|||
|
||||
static inline void __SetPageBuddy(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
|
||||
VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
|
||||
atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
|
||||
}
|
||||
|
||||
static inline void __ClearPageBuddy(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageBuddy(page));
|
||||
VM_BUG_ON_PAGE(!PageBuddy(page), page);
|
||||
atomic_set(&page->_mapcount, -1);
|
||||
}
|
||||
|
||||
|
|
@ -1401,7 +1402,7 @@ static inline bool ptlock_init(struct page *page)
|
|||
* slab code uses page->slab_cache and page->first_page (for tail
|
||||
* pages), which share storage with page->ptl.
|
||||
*/
|
||||
VM_BUG_ON(*(unsigned long *)&page->ptl);
|
||||
VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
|
||||
if (!ptlock_alloc(page))
|
||||
return false;
|
||||
spin_lock_init(ptlock_ptr(page));
|
||||
|
|
@ -1492,7 +1493,7 @@ static inline bool pgtable_pmd_page_ctor(struct page *page)
|
|||
static inline void pgtable_pmd_page_dtor(struct page *page)
|
||||
{
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
VM_BUG_ON(page->pmd_huge_pte);
|
||||
VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
|
||||
#endif
|
||||
ptlock_free(page);
|
||||
}
|
||||
|
|
@ -2029,8 +2030,6 @@ extern void shake_page(struct page *p, int access);
|
|||
extern atomic_long_t num_poisoned_pages;
|
||||
extern int soft_offline_page(struct page *page, int flags);
|
||||
|
||||
extern void dump_page(struct page *page);
|
||||
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
|
||||
extern void clear_huge_page(struct page *page,
|
||||
unsigned long addr,
|
||||
|
|
|
|||
|
|
@ -1,10 +1,19 @@
|
|||
#ifndef LINUX_MM_DEBUG_H
|
||||
#define LINUX_MM_DEBUG_H 1
|
||||
|
||||
struct page;
|
||||
|
||||
extern void dump_page(struct page *page, char *reason);
|
||||
extern void dump_page_badflags(struct page *page, char *reason,
|
||||
unsigned long badflags);
|
||||
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
#define VM_BUG_ON(cond) BUG_ON(cond)
|
||||
#define VM_BUG_ON_PAGE(cond, page) \
|
||||
do { if (unlikely(cond)) { dump_page(page, NULL); BUG(); } } while (0)
|
||||
#else
|
||||
#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
|
||||
#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_VIRTUAL
|
||||
|
|
|
|||
|
|
@ -377,8 +377,13 @@ static inline bool of_have_populated_dt(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
/* Kill an unused variable warning on a device_node pointer */
|
||||
static inline void __of_use_dn(const struct device_node *np)
|
||||
{
|
||||
}
|
||||
|
||||
#define for_each_child_of_node(parent, child) \
|
||||
while (0)
|
||||
while (__of_use_dn(parent), __of_use_dn(child), 0)
|
||||
|
||||
#define for_each_available_child_of_node(parent, child) \
|
||||
while (0)
|
||||
|
|
|
|||
|
|
@ -412,7 +412,7 @@ static inline void ClearPageCompound(struct page *page)
|
|||
*/
|
||||
static inline int PageTransHuge(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(PageTail(page));
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
return PageHead(page);
|
||||
}
|
||||
|
||||
|
|
@ -460,25 +460,25 @@ static inline int PageTransTail(struct page *page)
|
|||
*/
|
||||
static inline int PageSlabPfmemalloc(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageSlab(page));
|
||||
VM_BUG_ON_PAGE(!PageSlab(page), page);
|
||||
return PageActive(page);
|
||||
}
|
||||
|
||||
static inline void SetPageSlabPfmemalloc(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageSlab(page));
|
||||
VM_BUG_ON_PAGE(!PageSlab(page), page);
|
||||
SetPageActive(page);
|
||||
}
|
||||
|
||||
static inline void __ClearPageSlabPfmemalloc(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageSlab(page));
|
||||
VM_BUG_ON_PAGE(!PageSlab(page), page);
|
||||
__ClearPageActive(page);
|
||||
}
|
||||
|
||||
static inline void ClearPageSlabPfmemalloc(struct page *page)
|
||||
{
|
||||
VM_BUG_ON(!PageSlab(page));
|
||||
VM_BUG_ON_PAGE(!PageSlab(page), page);
|
||||
ClearPageActive(page);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -162,7 +162,7 @@ static inline int page_cache_get_speculative(struct page *page)
|
|||
* disabling preempt, and hence no need for the "speculative get" that
|
||||
* SMP requires.
|
||||
*/
|
||||
VM_BUG_ON(page_count(page) == 0);
|
||||
VM_BUG_ON_PAGE(page_count(page) == 0, page);
|
||||
atomic_inc(&page->_count);
|
||||
|
||||
#else
|
||||
|
|
@ -175,7 +175,7 @@ static inline int page_cache_get_speculative(struct page *page)
|
|||
return 0;
|
||||
}
|
||||
#endif
|
||||
VM_BUG_ON(PageTail(page));
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
|
@ -191,14 +191,14 @@ static inline int page_cache_add_speculative(struct page *page, int count)
|
|||
# ifdef CONFIG_PREEMPT_COUNT
|
||||
VM_BUG_ON(!in_atomic());
|
||||
# endif
|
||||
VM_BUG_ON(page_count(page) == 0);
|
||||
VM_BUG_ON_PAGE(page_count(page) == 0, page);
|
||||
atomic_add(count, &page->_count);
|
||||
|
||||
#else
|
||||
if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
|
||||
return 0;
|
||||
#endif
|
||||
VM_BUG_ON(PageCompound(page) && page != compound_head(page));
|
||||
VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
|
@ -210,7 +210,7 @@ static inline int page_freeze_refs(struct page *page, int count)
|
|||
|
||||
static inline void page_unfreeze_refs(struct page *page, int count)
|
||||
{
|
||||
VM_BUG_ON(page_count(page) != 0);
|
||||
VM_BUG_ON_PAGE(page_count(page) != 0, page);
|
||||
VM_BUG_ON(count == 0);
|
||||
|
||||
atomic_set(&page->_count, count);
|
||||
|
|
|
|||
|
|
@ -29,5 +29,6 @@ int match_token(char *, const match_table_t table, substring_t args[]);
|
|||
int match_int(substring_t *, int *result);
|
||||
int match_octal(substring_t *, int *result);
|
||||
int match_hex(substring_t *, int *result);
|
||||
bool match_wildcard(const char *pattern, const char *str);
|
||||
size_t match_strlcpy(char *, const substring_t *, size_t);
|
||||
char *match_strdup(const substring_t *);
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
#ifndef __LINUX_PERCPU_H
|
||||
#define __LINUX_PERCPU_H
|
||||
|
||||
#include <linux/mmdebug.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpumask.h>
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/kern_levels.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/cache.h>
|
||||
|
||||
extern const char linux_banner[];
|
||||
extern const char linux_proc_banner[];
|
||||
|
|
@ -253,17 +254,17 @@ extern asmlinkage void dump_stack(void) __cold;
|
|||
*/
|
||||
|
||||
#ifdef CONFIG_PRINTK
|
||||
#define printk_once(fmt, ...) \
|
||||
({ \
|
||||
static bool __print_once; \
|
||||
\
|
||||
if (!__print_once) { \
|
||||
__print_once = true; \
|
||||
printk(fmt, ##__VA_ARGS__); \
|
||||
} \
|
||||
#define printk_once(fmt, ...) \
|
||||
({ \
|
||||
static bool __print_once __read_mostly; \
|
||||
\
|
||||
if (!__print_once) { \
|
||||
__print_once = true; \
|
||||
printk(fmt, ##__VA_ARGS__); \
|
||||
} \
|
||||
})
|
||||
#else
|
||||
#define printk_once(fmt, ...) \
|
||||
#define printk_once(fmt, ...) \
|
||||
no_printk(fmt, ##__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
|
|
|
|||
|
|
@ -14,13 +14,6 @@ ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
|
|||
}
|
||||
#else
|
||||
extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize);
|
||||
extern unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
|
||||
unsigned long addr,
|
||||
unsigned long len,
|
||||
unsigned long pgoff,
|
||||
unsigned long flags);
|
||||
|
||||
extern int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma);
|
||||
#endif
|
||||
|
||||
extern const struct file_operations ramfs_file_operations;
|
||||
|
|
|
|||
|
|
@ -229,7 +229,7 @@ extern char ___assert_task_state[1 - 2*!!(
|
|||
/* get_task_state() */
|
||||
#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
|
||||
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
|
||||
__TASK_TRACED)
|
||||
__TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
|
||||
|
||||
#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
|
||||
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
|
||||
|
|
@ -391,22 +391,33 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
|
|||
static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
|
||||
#endif
|
||||
|
||||
|
||||
extern void set_dumpable(struct mm_struct *mm, int value);
|
||||
extern int get_dumpable(struct mm_struct *mm);
|
||||
|
||||
#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
|
||||
#define SUID_DUMP_USER 1 /* Dump as user of process */
|
||||
#define SUID_DUMP_ROOT 2 /* Dump as root */
|
||||
|
||||
/* mm flags */
|
||||
/* dumpable bits */
|
||||
#define MMF_DUMPABLE 0 /* core dump is permitted */
|
||||
#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */
|
||||
|
||||
/* for SUID_DUMP_* above */
|
||||
#define MMF_DUMPABLE_BITS 2
|
||||
#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
|
||||
|
||||
extern void set_dumpable(struct mm_struct *mm, int value);
|
||||
/*
|
||||
* This returns the actual value of the suid_dumpable flag. For things
|
||||
* that are using this for checking for privilege transitions, it must
|
||||
* test against SUID_DUMP_USER rather than treating it as a boolean
|
||||
* value.
|
||||
*/
|
||||
static inline int __get_dumpable(unsigned long mm_flags)
|
||||
{
|
||||
return mm_flags & MMF_DUMPABLE_MASK;
|
||||
}
|
||||
|
||||
static inline int get_dumpable(struct mm_struct *mm)
|
||||
{
|
||||
return __get_dumpable(mm->flags);
|
||||
}
|
||||
|
||||
/* coredump filter bits */
|
||||
#define MMF_DUMP_ANON_PRIVATE 2
|
||||
#define MMF_DUMP_ANON_SHARED 3
|
||||
|
|
@ -1228,7 +1239,6 @@ struct task_struct {
|
|||
/* Used for emulating ABI behavior of previous Linux versions */
|
||||
unsigned int personality;
|
||||
|
||||
unsigned did_exec:1;
|
||||
unsigned in_execve:1; /* Tell the LSMs that the process is doing an
|
||||
* execve */
|
||||
unsigned in_iowait:1;
|
||||
|
|
@ -2284,8 +2294,6 @@ extern struct mm_struct *get_task_mm(struct task_struct *task);
|
|||
extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
|
||||
/* Remove the current tasks stale references to the old mm_struct */
|
||||
extern void mm_release(struct task_struct *, struct mm_struct *);
|
||||
/* Allocate a new mm structure and copy contents from tsk->mm */
|
||||
extern struct mm_struct *dup_mm(struct task_struct *tsk);
|
||||
|
||||
extern int copy_thread(unsigned long, unsigned long, unsigned long,
|
||||
struct task_struct *);
|
||||
|
|
|
|||
|
|
@ -99,4 +99,8 @@ extern int sched_rt_handler(struct ctl_table *table, int write,
|
|||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
|
||||
extern int sysctl_numa_balancing(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
|
||||
#endif /* _SCHED_SYSCTL_H */
|
||||
|
|
|
|||
|
|
@ -513,7 +513,9 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
|||
*
|
||||
* Both the root cache and the child caches will have it. For the root cache,
|
||||
* this will hold a dynamically allocated array large enough to hold
|
||||
* information about the currently limited memcgs in the system.
|
||||
* information about the currently limited memcgs in the system. To allow the
|
||||
* array to be accessed without taking any locks, on relocation we free the old
|
||||
* version only after a grace period.
|
||||
*
|
||||
* Child caches will hold extra metadata needed for its operation. Fields are:
|
||||
*
|
||||
|
|
@ -528,7 +530,10 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
|||
struct memcg_cache_params {
|
||||
bool is_root_cache;
|
||||
union {
|
||||
struct kmem_cache *memcg_caches[0];
|
||||
struct {
|
||||
struct rcu_head rcu_head;
|
||||
struct kmem_cache *memcg_caches[0];
|
||||
};
|
||||
struct {
|
||||
struct mem_cgroup *memcg;
|
||||
struct list_head list;
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ struct w1_gpio_platform_data {
|
|||
unsigned int is_open_drain:1;
|
||||
void (*enable_external_pullup)(int enable);
|
||||
unsigned int ext_pullup_enable_pin;
|
||||
unsigned int pullup_duration;
|
||||
};
|
||||
|
||||
#endif /* _LINUX_W1_GPIO_H */
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue