Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: - fsnotify fix - poll() timeout fix - a few scripts/ tweaks - debugobjects updates - the (small) ocfs2 queue - Minor fixes to kernel/padata.c - Maybe half of the MM queue * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (117 commits) mm, page_alloc: restore the original nodemask if the fast path allocation failed mm, page_alloc: uninline the bad page part of check_new_page() mm, page_alloc: don't duplicate code in free_pcp_prepare mm, page_alloc: defer debugging checks of pages allocated from the PCP mm, page_alloc: defer debugging checks of freed pages until a PCP drain cpuset: use static key better and convert to new API mm, page_alloc: inline pageblock lookup in page free fast paths mm, page_alloc: remove unnecessary variable from free_pcppages_bulk mm, page_alloc: pull out side effects from free_pages_check mm, page_alloc: un-inline the bad part of free_pages_check mm, page_alloc: check multiple page fields with a single branch mm, page_alloc: remove field from alloc_context mm, page_alloc: avoid looking up the first zone in a zonelist twice mm, page_alloc: shortcut watermark checks for order-0 pages mm, page_alloc: reduce cost of fair zone allocation policy retry mm, page_alloc: shorten the page allocator fast path mm, page_alloc: check once if a zone has isolated pageblocks mm, page_alloc: move __GFP_HARDWALL modifications out of the fastpath mm, page_alloc: simplify last cpupid reset mm, page_alloc: remove unnecessary initialisation from __alloc_pages_nodemask() ...
This commit is contained in:
commit
a05a70db34
122 changed files with 2310 additions and 1631 deletions
|
|
@ -806,4 +806,12 @@ static inline int pmd_clear_huge(pmd_t *pmd)
|
|||
#define io_remap_pfn_range remap_pfn_range
|
||||
#endif
|
||||
|
||||
#ifndef has_transparent_hugepage
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#define has_transparent_hugepage() 1
|
||||
#else
|
||||
#define has_transparent_hugepage() 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_GENERIC_PGTABLE_H */
|
||||
|
|
|
|||
|
|
@ -83,34 +83,34 @@ extern void *__alloc_bootmem(unsigned long size,
|
|||
unsigned long goal);
|
||||
extern void *__alloc_bootmem_nopanic(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
unsigned long goal) __malloc;
|
||||
extern void *__alloc_bootmem_node(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
unsigned long goal) __malloc;
|
||||
void *__alloc_bootmem_node_high(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
unsigned long goal) __malloc;
|
||||
extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
unsigned long goal) __malloc;
|
||||
void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal,
|
||||
unsigned long limit);
|
||||
unsigned long limit) __malloc;
|
||||
extern void *__alloc_bootmem_low(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
unsigned long goal) __malloc;
|
||||
void *__alloc_bootmem_low_nopanic(unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
unsigned long goal) __malloc;
|
||||
extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
|
||||
unsigned long size,
|
||||
unsigned long align,
|
||||
unsigned long goal);
|
||||
unsigned long goal) __malloc;
|
||||
|
||||
#ifdef CONFIG_NO_BOOTMEM
|
||||
/* We are using top down, so it is safe to use 0 here */
|
||||
|
|
|
|||
|
|
@ -39,12 +39,12 @@ extern int sysctl_compact_unevictable_allowed;
|
|||
|
||||
extern int fragmentation_index(struct zone *zone, unsigned int order);
|
||||
extern unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
|
||||
int alloc_flags, const struct alloc_context *ac,
|
||||
enum migrate_mode mode, int *contended);
|
||||
unsigned int alloc_flags, const struct alloc_context *ac,
|
||||
enum migrate_mode mode, int *contended);
|
||||
extern void compact_pgdat(pg_data_t *pgdat, int order);
|
||||
extern void reset_isolation_suitable(pg_data_t *pgdat);
|
||||
extern unsigned long compaction_suitable(struct zone *zone, int order,
|
||||
int alloc_flags, int classzone_idx);
|
||||
unsigned int alloc_flags, int classzone_idx);
|
||||
|
||||
extern void defer_compaction(struct zone *zone, int order);
|
||||
extern bool compaction_deferred(struct zone *zone, int order);
|
||||
|
|
|
|||
|
|
@ -142,6 +142,7 @@
|
|||
|
||||
#if GCC_VERSION >= 30400
|
||||
#define __must_check __attribute__((warn_unused_result))
|
||||
#define __malloc __attribute__((__malloc__))
|
||||
#endif
|
||||
|
||||
#if GCC_VERSION >= 40000
|
||||
|
|
|
|||
|
|
@ -357,6 +357,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
|
|||
#define __deprecated_for_modules
|
||||
#endif
|
||||
|
||||
#ifndef __malloc
|
||||
#define __malloc
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Allow us to avoid 'defined but not used' warnings on functions and data,
|
||||
* as well as force them to be emitted to the assembly file.
|
||||
|
|
|
|||
|
|
@ -16,26 +16,26 @@
|
|||
|
||||
#ifdef CONFIG_CPUSETS
|
||||
|
||||
extern struct static_key cpusets_enabled_key;
|
||||
extern struct static_key_false cpusets_enabled_key;
|
||||
static inline bool cpusets_enabled(void)
|
||||
{
|
||||
return static_key_false(&cpusets_enabled_key);
|
||||
return static_branch_unlikely(&cpusets_enabled_key);
|
||||
}
|
||||
|
||||
static inline int nr_cpusets(void)
|
||||
{
|
||||
/* jump label reference count + the top-level cpuset */
|
||||
return static_key_count(&cpusets_enabled_key) + 1;
|
||||
return static_key_count(&cpusets_enabled_key.key) + 1;
|
||||
}
|
||||
|
||||
static inline void cpuset_inc(void)
|
||||
{
|
||||
static_key_slow_inc(&cpusets_enabled_key);
|
||||
static_branch_inc(&cpusets_enabled_key);
|
||||
}
|
||||
|
||||
static inline void cpuset_dec(void)
|
||||
{
|
||||
static_key_slow_dec(&cpusets_enabled_key);
|
||||
static_branch_dec(&cpusets_enabled_key);
|
||||
}
|
||||
|
||||
extern int cpuset_init(void);
|
||||
|
|
@ -48,16 +48,25 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
|
|||
void cpuset_init_current_mems_allowed(void);
|
||||
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
|
||||
|
||||
extern int __cpuset_node_allowed(int node, gfp_t gfp_mask);
|
||||
extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
|
||||
|
||||
static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
|
||||
static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
|
||||
{
|
||||
return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask);
|
||||
if (cpusets_enabled())
|
||||
return __cpuset_node_allowed(node, gfp_mask);
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
|
||||
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
|
||||
{
|
||||
return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
|
||||
return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
|
||||
}
|
||||
|
||||
static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
|
||||
{
|
||||
if (cpusets_enabled())
|
||||
return __cpuset_zone_allowed(z, gfp_mask);
|
||||
return true;
|
||||
}
|
||||
|
||||
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
|
||||
|
|
@ -172,14 +181,19 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
|
||||
static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
|
||||
{
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
|
||||
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
|
||||
{
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
|
||||
|
|
|
|||
|
|
@ -38,8 +38,10 @@ struct debug_obj {
|
|||
* @name: name of the object typee
|
||||
* @debug_hint: function returning address, which have associated
|
||||
* kernel symbol, to allow identify the object
|
||||
* @is_static_object return true if the obj is static, otherwise return false
|
||||
* @fixup_init: fixup function, which is called when the init check
|
||||
* fails
|
||||
* fails. All fixup functions must return true if fixup
|
||||
* was successful, otherwise return false
|
||||
* @fixup_activate: fixup function, which is called when the activate check
|
||||
* fails
|
||||
* @fixup_destroy: fixup function, which is called when the destroy check
|
||||
|
|
@ -51,12 +53,13 @@ struct debug_obj {
|
|||
*/
|
||||
struct debug_obj_descr {
|
||||
const char *name;
|
||||
void *(*debug_hint) (void *addr);
|
||||
int (*fixup_init) (void *addr, enum debug_obj_state state);
|
||||
int (*fixup_activate) (void *addr, enum debug_obj_state state);
|
||||
int (*fixup_destroy) (void *addr, enum debug_obj_state state);
|
||||
int (*fixup_free) (void *addr, enum debug_obj_state state);
|
||||
int (*fixup_assert_init)(void *addr, enum debug_obj_state state);
|
||||
void *(*debug_hint)(void *addr);
|
||||
bool (*is_static_object)(void *addr);
|
||||
bool (*fixup_init)(void *addr, enum debug_obj_state state);
|
||||
bool (*fixup_activate)(void *addr, enum debug_obj_state state);
|
||||
bool (*fixup_destroy)(void *addr, enum debug_obj_state state);
|
||||
bool (*fixup_free)(void *addr, enum debug_obj_state state);
|
||||
bool (*fixup_assert_init)(void *addr, enum debug_obj_state state);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_OBJECTS
|
||||
|
|
|
|||
|
|
@ -609,14 +609,14 @@ typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
|
|||
|
||||
#ifdef CONFIG_DEBUG_DEVRES
|
||||
extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
|
||||
int nid, const char *name);
|
||||
int nid, const char *name) __malloc;
|
||||
#define devres_alloc(release, size, gfp) \
|
||||
__devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release)
|
||||
#define devres_alloc_node(release, size, gfp, nid) \
|
||||
__devres_alloc_node(release, size, gfp, nid, #release)
|
||||
#else
|
||||
extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp,
|
||||
int nid);
|
||||
int nid) __malloc;
|
||||
static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
|
||||
{
|
||||
return devres_alloc_node(release, size, gfp, NUMA_NO_NODE);
|
||||
|
|
@ -648,12 +648,12 @@ extern void devres_remove_group(struct device *dev, void *id);
|
|||
extern int devres_release_group(struct device *dev, void *id);
|
||||
|
||||
/* managed devm_k.alloc/kfree for device drivers */
|
||||
extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
|
||||
extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) __malloc;
|
||||
extern __printf(3, 0)
|
||||
char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
|
||||
va_list ap);
|
||||
va_list ap) __malloc;
|
||||
extern __printf(3, 4)
|
||||
char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
|
||||
char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) __malloc;
|
||||
static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
|
||||
{
|
||||
return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
|
||||
|
|
@ -671,7 +671,7 @@ static inline void *devm_kcalloc(struct device *dev,
|
|||
return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
|
||||
}
|
||||
extern void devm_kfree(struct device *dev, void *p);
|
||||
extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp);
|
||||
extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
|
||||
extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
|
||||
gfp_t gfp);
|
||||
|
||||
|
|
|
|||
|
|
@ -359,8 +359,6 @@ extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
|
|||
extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group);
|
||||
/* run all the marks in a group, and clear all of the marks where mark->flags & flags is true*/
|
||||
extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags);
|
||||
/* run all the marks in a group, and flag them to be freed */
|
||||
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
|
||||
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
|
||||
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
|
||||
extern void fsnotify_unmount_inodes(struct super_block *sb);
|
||||
|
|
|
|||
|
|
@ -28,9 +28,7 @@ extern int zap_huge_pmd(struct mmu_gather *tlb,
|
|||
extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, unsigned long end,
|
||||
unsigned char *vec);
|
||||
extern bool move_huge_pmd(struct vm_area_struct *vma,
|
||||
struct vm_area_struct *new_vma,
|
||||
unsigned long old_addr,
|
||||
extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
|
||||
unsigned long new_addr, unsigned long old_end,
|
||||
pmd_t *old_pmd, pmd_t *new_pmd);
|
||||
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
|
|
|
|||
|
|
@ -338,6 +338,7 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
|
|||
/* arch callback */
|
||||
int __init alloc_bootmem_huge_page(struct hstate *h);
|
||||
|
||||
void __init hugetlb_bad_size(void);
|
||||
void __init hugetlb_add_hstate(unsigned order);
|
||||
struct hstate *size_to_hstate(unsigned long size);
|
||||
|
||||
|
|
|
|||
|
|
@ -5,16 +5,16 @@
|
|||
|
||||
#include <linux/mm.h>
|
||||
|
||||
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
|
||||
static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
|
||||
{
|
||||
return !!(vma->vm_flags & VM_HUGETLB);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
|
||||
static inline bool is_vm_hugetlb_page(struct vm_area_struct *vma)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -412,9 +412,9 @@ extern __printf(3, 4)
|
|||
int scnprintf(char *buf, size_t size, const char *fmt, ...);
|
||||
extern __printf(3, 0)
|
||||
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
|
||||
extern __printf(2, 3)
|
||||
extern __printf(2, 3) __malloc
|
||||
char *kasprintf(gfp_t gfp, const char *fmt, ...);
|
||||
extern __printf(2, 0)
|
||||
extern __printf(2, 0) __malloc
|
||||
char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
|
||||
extern __printf(2, 0)
|
||||
const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
|
||||
|
|
|
|||
|
|
@ -658,12 +658,6 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
|
||||
int increment)
|
||||
{
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
|
||||
int nid, unsigned int lru_mask)
|
||||
|
|
|
|||
|
|
@ -247,16 +247,16 @@ static inline void mem_hotplug_done(void) {}
|
|||
|
||||
#ifdef CONFIG_MEMORY_HOTREMOVE
|
||||
|
||||
extern int is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
|
||||
extern bool is_mem_section_removable(unsigned long pfn, unsigned long nr_pages);
|
||||
extern void try_offline_node(int nid);
|
||||
extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages);
|
||||
extern void remove_memory(int nid, u64 start, u64 size);
|
||||
|
||||
#else
|
||||
static inline int is_mem_section_removable(unsigned long pfn,
|
||||
static inline bool is_mem_section_removable(unsigned long pfn,
|
||||
unsigned long nr_pages)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void try_offline_node(int nid) {}
|
||||
|
|
|
|||
|
|
@ -172,14 +172,14 @@ extern int mpol_parse_str(char *str, struct mempolicy **mpol);
|
|||
extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
|
||||
|
||||
/* Check if a vma is migratable */
|
||||
static inline int vma_migratable(struct vm_area_struct *vma)
|
||||
static inline bool vma_migratable(struct vm_area_struct *vma)
|
||||
{
|
||||
if (vma->vm_flags & (VM_IO | VM_PFNMAP))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
|
||||
if (vma->vm_flags & VM_HUGETLB)
|
||||
return 0;
|
||||
return false;
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
@ -190,8 +190,8 @@ static inline int vma_migratable(struct vm_area_struct *vma)
|
|||
if (vma->vm_file &&
|
||||
gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
|
||||
< policy_zone)
|
||||
return 0;
|
||||
return 1;
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
|
||||
|
|
@ -228,6 +228,12 @@ static inline void mpol_free_shared_policy(struct shared_policy *p)
|
|||
{
|
||||
}
|
||||
|
||||
static inline struct mempolicy *
|
||||
mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define vma_policy(vma) NULL
|
||||
|
||||
static inline int
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@
|
|||
#define _LINUX_MEMPOOL_H
|
||||
|
||||
#include <linux/wait.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
struct kmem_cache;
|
||||
|
||||
|
|
@ -31,7 +32,7 @@ extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
|
|||
|
||||
extern int mempool_resize(mempool_t *pool, int new_min_nr);
|
||||
extern void mempool_destroy(mempool_t *pool);
|
||||
extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask);
|
||||
extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
|
||||
extern void mempool_free(void *element, mempool_t *pool);
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -447,14 +447,14 @@ unsigned long vmalloc_to_pfn(const void *addr);
|
|||
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
|
||||
* is no special casing required.
|
||||
*/
|
||||
static inline int is_vmalloc_addr(const void *x)
|
||||
static inline bool is_vmalloc_addr(const void *x)
|
||||
{
|
||||
#ifdef CONFIG_MMU
|
||||
unsigned long addr = (unsigned long)x;
|
||||
|
||||
return addr >= VMALLOC_START && addr < VMALLOC_END;
|
||||
#else
|
||||
return 0;
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
#ifdef CONFIG_MMU
|
||||
|
|
@ -734,7 +734,7 @@ static inline void get_page(struct page *page)
|
|||
page = compound_head(page);
|
||||
/*
|
||||
* Getting a normal page or the head of a compound page
|
||||
* requires to already have an elevated page->_count.
|
||||
* requires to already have an elevated page->_refcount.
|
||||
*/
|
||||
VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
|
||||
page_ref_inc(page);
|
||||
|
|
@ -850,10 +850,7 @@ extern int page_cpupid_xchg_last(struct page *page, int cpupid);
|
|||
|
||||
static inline void page_cpupid_reset_last(struct page *page)
|
||||
{
|
||||
int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
|
||||
|
||||
page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
|
||||
page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
|
||||
page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
|
||||
}
|
||||
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
|
||||
#else /* !CONFIG_NUMA_BALANCING */
|
||||
|
|
@ -1032,26 +1029,7 @@ static inline pgoff_t page_file_index(struct page *page)
|
|||
return page->index;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if this page is mapped into pagetables.
|
||||
* For compound page it returns true if any subpage of compound page is mapped.
|
||||
*/
|
||||
static inline bool page_mapped(struct page *page)
|
||||
{
|
||||
int i;
|
||||
if (likely(!PageCompound(page)))
|
||||
return atomic_read(&page->_mapcount) >= 0;
|
||||
page = compound_head(page);
|
||||
if (atomic_read(compound_mapcount_ptr(page)) >= 0)
|
||||
return true;
|
||||
if (PageHuge(page))
|
||||
return false;
|
||||
for (i = 0; i < hpage_nr_pages(page); i++) {
|
||||
if (atomic_read(&page[i]._mapcount) >= 0)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
bool page_mapped(struct page *page);
|
||||
|
||||
/*
|
||||
* Return true only if the page has been allocated with
|
||||
|
|
|
|||
|
|
@ -22,22 +22,34 @@ static inline int page_is_file_cache(struct page *page)
|
|||
return !PageSwapBacked(page);
|
||||
}
|
||||
|
||||
static __always_inline void __update_lru_size(struct lruvec *lruvec,
|
||||
enum lru_list lru, int nr_pages)
|
||||
{
|
||||
__mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages);
|
||||
}
|
||||
|
||||
static __always_inline void update_lru_size(struct lruvec *lruvec,
|
||||
enum lru_list lru, int nr_pages)
|
||||
{
|
||||
#ifdef CONFIG_MEMCG
|
||||
mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
|
||||
#else
|
||||
__update_lru_size(lruvec, lru, nr_pages);
|
||||
#endif
|
||||
}
|
||||
|
||||
static __always_inline void add_page_to_lru_list(struct page *page,
|
||||
struct lruvec *lruvec, enum lru_list lru)
|
||||
{
|
||||
int nr_pages = hpage_nr_pages(page);
|
||||
mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
|
||||
update_lru_size(lruvec, lru, hpage_nr_pages(page));
|
||||
list_add(&page->lru, &lruvec->lists[lru]);
|
||||
__mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages);
|
||||
}
|
||||
|
||||
static __always_inline void del_page_from_lru_list(struct page *page,
|
||||
struct lruvec *lruvec, enum lru_list lru)
|
||||
{
|
||||
int nr_pages = hpage_nr_pages(page);
|
||||
mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
|
||||
list_del(&page->lru);
|
||||
__mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages);
|
||||
update_lru_size(lruvec, lru, -hpage_nr_pages(page));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -73,9 +73,9 @@ struct page {
|
|||
unsigned long counters;
|
||||
#else
|
||||
/*
|
||||
* Keep _count separate from slub cmpxchg_double data.
|
||||
* As the rest of the double word is protected by
|
||||
* slab_lock but _count is not.
|
||||
* Keep _refcount separate from slub cmpxchg_double
|
||||
* data. As the rest of the double word is protected by
|
||||
* slab_lock but _refcount is not.
|
||||
*/
|
||||
unsigned counters;
|
||||
#endif
|
||||
|
|
@ -97,7 +97,11 @@ struct page {
|
|||
};
|
||||
int units; /* SLOB */
|
||||
};
|
||||
atomic_t _count; /* Usage count, see below. */
|
||||
/*
|
||||
* Usage count, *USE WRAPPER FUNCTION*
|
||||
* when manual accounting. See page_ref.h
|
||||
*/
|
||||
atomic_t _refcount;
|
||||
};
|
||||
unsigned int active; /* SLAB */
|
||||
};
|
||||
|
|
@ -248,7 +252,7 @@ struct page_frag_cache {
|
|||
__u32 offset;
|
||||
#endif
|
||||
/* we maintain a pagecount bias, so that we dont dirty cache line
|
||||
* containing page->_count every time we allocate a fragment.
|
||||
* containing page->_refcount every time we allocate a fragment.
|
||||
*/
|
||||
unsigned int pagecnt_bias;
|
||||
bool pfmemalloc;
|
||||
|
|
|
|||
|
|
@ -85,13 +85,6 @@ extern int page_group_by_mobility_disabled;
|
|||
get_pfnblock_flags_mask(page, page_to_pfn(page), \
|
||||
PB_migrate_end, MIGRATETYPE_MASK)
|
||||
|
||||
static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
|
||||
{
|
||||
BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2);
|
||||
return get_pfnblock_flags_mask(page, pfn, PB_migrate_end,
|
||||
MIGRATETYPE_MASK);
|
||||
}
|
||||
|
||||
struct free_area {
|
||||
struct list_head free_list[MIGRATE_TYPES];
|
||||
unsigned long nr_free;
|
||||
|
|
@ -747,7 +740,8 @@ extern struct mutex zonelists_mutex;
|
|||
void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
|
||||
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
|
||||
bool zone_watermark_ok(struct zone *z, unsigned int order,
|
||||
unsigned long mark, int classzone_idx, int alloc_flags);
|
||||
unsigned long mark, int classzone_idx,
|
||||
unsigned int alloc_flags);
|
||||
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
|
||||
unsigned long mark, int classzone_idx);
|
||||
enum memmap_context {
|
||||
|
|
@ -828,10 +822,7 @@ static inline int is_highmem_idx(enum zone_type idx)
|
|||
static inline int is_highmem(struct zone *zone)
|
||||
{
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
|
||||
return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
|
||||
(zone_off == ZONE_MOVABLE * sizeof(*zone) &&
|
||||
zone_movable_is_highmem());
|
||||
return is_highmem_idx(zone_idx(zone));
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
|
|
@ -922,6 +913,10 @@ static inline int zonelist_node_idx(struct zoneref *zoneref)
|
|||
#endif /* CONFIG_NUMA */
|
||||
}
|
||||
|
||||
struct zoneref *__next_zones_zonelist(struct zoneref *z,
|
||||
enum zone_type highest_zoneidx,
|
||||
nodemask_t *nodes);
|
||||
|
||||
/**
|
||||
* next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
|
||||
* @z - The cursor used as a starting point for the search
|
||||
|
|
@ -934,9 +929,14 @@ static inline int zonelist_node_idx(struct zoneref *zoneref)
|
|||
* being examined. It should be advanced by one before calling
|
||||
* next_zones_zonelist again.
|
||||
*/
|
||||
struct zoneref *next_zones_zonelist(struct zoneref *z,
|
||||
static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
|
||||
enum zone_type highest_zoneidx,
|
||||
nodemask_t *nodes);
|
||||
nodemask_t *nodes)
|
||||
{
|
||||
if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
|
||||
return z;
|
||||
return __next_zones_zonelist(z, highest_zoneidx, nodes);
|
||||
}
|
||||
|
||||
/**
|
||||
* first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
|
||||
|
|
@ -952,13 +952,10 @@ struct zoneref *next_zones_zonelist(struct zoneref *z,
|
|||
*/
|
||||
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
|
||||
enum zone_type highest_zoneidx,
|
||||
nodemask_t *nodes,
|
||||
struct zone **zone)
|
||||
nodemask_t *nodes)
|
||||
{
|
||||
struct zoneref *z = next_zones_zonelist(zonelist->_zonerefs,
|
||||
return next_zones_zonelist(zonelist->_zonerefs,
|
||||
highest_zoneidx, nodes);
|
||||
*zone = zonelist_zone(z);
|
||||
return z;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -973,10 +970,17 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
|
|||
* within a given nodemask
|
||||
*/
|
||||
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
|
||||
for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
|
||||
for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
|
||||
zone; \
|
||||
z = next_zones_zonelist(++z, highidx, nodemask), \
|
||||
zone = zonelist_zone(z)) \
|
||||
zone = zonelist_zone(z))
|
||||
|
||||
#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
|
||||
for (zone = z->zone; \
|
||||
zone; \
|
||||
z = next_zones_zonelist(++z, highidx, nodemask), \
|
||||
zone = zonelist_zone(z))
|
||||
|
||||
|
||||
/**
|
||||
* for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
|
||||
|
|
|
|||
|
|
@ -43,8 +43,10 @@
|
|||
*
|
||||
* int first_node(mask) Number lowest set bit, or MAX_NUMNODES
|
||||
* int next_node(node, mask) Next node past 'node', or MAX_NUMNODES
|
||||
* int next_node_in(node, mask) Next node past 'node', or wrap to first,
|
||||
* or MAX_NUMNODES
|
||||
* int first_unset_node(mask) First node not set in mask, or
|
||||
* MAX_NUMNODES.
|
||||
* MAX_NUMNODES
|
||||
*
|
||||
* nodemask_t nodemask_of_node(node) Return nodemask with bit 'node' set
|
||||
* NODE_MASK_ALL Initializer - all bits set
|
||||
|
|
@ -259,6 +261,13 @@ static inline int __next_node(int n, const nodemask_t *srcp)
|
|||
return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the next present node in src, starting after node n, wrapping around to
|
||||
* the first node in src if needed. Returns MAX_NUMNODES if src is empty.
|
||||
*/
|
||||
#define next_node_in(n, src) __next_node_in((n), &(src))
|
||||
int __next_node_in(int node, const nodemask_t *srcp);
|
||||
|
||||
static inline void init_nodemask_of_node(nodemask_t *mask, int node)
|
||||
{
|
||||
nodes_clear(*mask);
|
||||
|
|
|
|||
|
|
@ -72,6 +72,14 @@ static inline bool oom_task_origin(const struct task_struct *p)
|
|||
|
||||
extern void mark_oom_victim(struct task_struct *tsk);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
extern void try_oom_reaper(struct task_struct *tsk);
|
||||
#else
|
||||
static inline void try_oom_reaper(struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
extern unsigned long oom_badness(struct task_struct *p,
|
||||
struct mem_cgroup *memcg, const nodemask_t *nodemask,
|
||||
unsigned long totalpages);
|
||||
|
|
|
|||
|
|
@ -175,11 +175,6 @@ extern int padata_do_parallel(struct padata_instance *pinst,
|
|||
extern void padata_do_serial(struct padata_priv *padata);
|
||||
extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
|
||||
cpumask_var_t cpumask);
|
||||
extern int padata_set_cpumasks(struct padata_instance *pinst,
|
||||
cpumask_var_t pcpumask,
|
||||
cpumask_var_t cbcpumask);
|
||||
extern int padata_add_cpu(struct padata_instance *pinst, int cpu, int mask);
|
||||
extern int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask);
|
||||
extern int padata_start(struct padata_instance *pinst);
|
||||
extern void padata_stop(struct padata_instance *pinst);
|
||||
extern int padata_register_cpumask_notifier(struct padata_instance *pinst,
|
||||
|
|
|
|||
|
|
@ -371,10 +371,15 @@ PAGEFLAG(Idle, idle, PF_ANY)
|
|||
#define PAGE_MAPPING_KSM 2
|
||||
#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
|
||||
|
||||
static __always_inline int PageAnonHead(struct page *page)
|
||||
{
|
||||
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
|
||||
}
|
||||
|
||||
static __always_inline int PageAnon(struct page *page)
|
||||
{
|
||||
page = compound_head(page);
|
||||
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
|
||||
return PageAnonHead(page);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KSM
|
||||
|
|
|
|||
|
|
@ -63,17 +63,17 @@ static inline void __page_ref_unfreeze(struct page *page, int v)
|
|||
|
||||
static inline int page_ref_count(struct page *page)
|
||||
{
|
||||
return atomic_read(&page->_count);
|
||||
return atomic_read(&page->_refcount);
|
||||
}
|
||||
|
||||
static inline int page_count(struct page *page)
|
||||
{
|
||||
return atomic_read(&compound_head(page)->_count);
|
||||
return atomic_read(&compound_head(page)->_refcount);
|
||||
}
|
||||
|
||||
static inline void set_page_count(struct page *page, int v)
|
||||
{
|
||||
atomic_set(&page->_count, v);
|
||||
atomic_set(&page->_refcount, v);
|
||||
if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
|
||||
__page_ref_set(page, v);
|
||||
}
|
||||
|
|
@ -89,35 +89,35 @@ static inline void init_page_count(struct page *page)
|
|||
|
||||
static inline void page_ref_add(struct page *page, int nr)
|
||||
{
|
||||
atomic_add(nr, &page->_count);
|
||||
atomic_add(nr, &page->_refcount);
|
||||
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
|
||||
__page_ref_mod(page, nr);
|
||||
}
|
||||
|
||||
static inline void page_ref_sub(struct page *page, int nr)
|
||||
{
|
||||
atomic_sub(nr, &page->_count);
|
||||
atomic_sub(nr, &page->_refcount);
|
||||
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
|
||||
__page_ref_mod(page, -nr);
|
||||
}
|
||||
|
||||
static inline void page_ref_inc(struct page *page)
|
||||
{
|
||||
atomic_inc(&page->_count);
|
||||
atomic_inc(&page->_refcount);
|
||||
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
|
||||
__page_ref_mod(page, 1);
|
||||
}
|
||||
|
||||
static inline void page_ref_dec(struct page *page)
|
||||
{
|
||||
atomic_dec(&page->_count);
|
||||
atomic_dec(&page->_refcount);
|
||||
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
|
||||
__page_ref_mod(page, -1);
|
||||
}
|
||||
|
||||
static inline int page_ref_sub_and_test(struct page *page, int nr)
|
||||
{
|
||||
int ret = atomic_sub_and_test(nr, &page->_count);
|
||||
int ret = atomic_sub_and_test(nr, &page->_refcount);
|
||||
|
||||
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
|
||||
__page_ref_mod_and_test(page, -nr, ret);
|
||||
|
|
@ -126,7 +126,7 @@ static inline int page_ref_sub_and_test(struct page *page, int nr)
|
|||
|
||||
static inline int page_ref_dec_and_test(struct page *page)
|
||||
{
|
||||
int ret = atomic_dec_and_test(&page->_count);
|
||||
int ret = atomic_dec_and_test(&page->_refcount);
|
||||
|
||||
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
|
||||
__page_ref_mod_and_test(page, -1, ret);
|
||||
|
|
@ -135,7 +135,7 @@ static inline int page_ref_dec_and_test(struct page *page)
|
|||
|
||||
static inline int page_ref_dec_return(struct page *page)
|
||||
{
|
||||
int ret = atomic_dec_return(&page->_count);
|
||||
int ret = atomic_dec_return(&page->_refcount);
|
||||
|
||||
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
|
||||
__page_ref_mod_and_return(page, -1, ret);
|
||||
|
|
@ -144,7 +144,7 @@ static inline int page_ref_dec_return(struct page *page)
|
|||
|
||||
static inline int page_ref_add_unless(struct page *page, int nr, int u)
|
||||
{
|
||||
int ret = atomic_add_unless(&page->_count, nr, u);
|
||||
int ret = atomic_add_unless(&page->_refcount, nr, u);
|
||||
|
||||
if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
|
||||
__page_ref_mod_unless(page, nr, ret);
|
||||
|
|
@ -153,7 +153,7 @@ static inline int page_ref_add_unless(struct page *page, int nr, int u)
|
|||
|
||||
static inline int page_ref_freeze(struct page *page, int count)
|
||||
{
|
||||
int ret = likely(atomic_cmpxchg(&page->_count, count, 0) == count);
|
||||
int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
|
||||
|
||||
if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
|
||||
__page_ref_freeze(page, count, ret);
|
||||
|
|
@ -165,7 +165,7 @@ static inline void page_ref_unfreeze(struct page *page, int count)
|
|||
VM_BUG_ON_PAGE(page_count(page) != 0, page);
|
||||
VM_BUG_ON(count == 0);
|
||||
|
||||
atomic_set(&page->_count, count);
|
||||
atomic_set(&page->_refcount, count);
|
||||
if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
|
||||
__page_ref_unfreeze(page, count);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -90,12 +90,12 @@ void release_pages(struct page **pages, int nr, bool cold);
|
|||
|
||||
/*
|
||||
* speculatively take a reference to a page.
|
||||
* If the page is free (_count == 0), then _count is untouched, and 0
|
||||
* is returned. Otherwise, _count is incremented by 1 and 1 is returned.
|
||||
* If the page is free (_refcount == 0), then _refcount is untouched, and 0
|
||||
* is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
|
||||
*
|
||||
* This function must be called inside the same rcu_read_lock() section as has
|
||||
* been used to lookup the page in the pagecache radix-tree (or page table):
|
||||
* this allows allocators to use a synchronize_rcu() to stabilize _count.
|
||||
* this allows allocators to use a synchronize_rcu() to stabilize _refcount.
|
||||
*
|
||||
* Unless an RCU grace period has passed, the count of all pages coming out
|
||||
* of the allocator must be considered unstable. page_count may return higher
|
||||
|
|
@ -111,7 +111,7 @@ void release_pages(struct page **pages, int nr, bool cold);
|
|||
* 2. conditionally increment refcount
|
||||
* 3. check the page is still in pagecache (if no, goto 1)
|
||||
*
|
||||
* Remove-side that cares about stability of _count (eg. reclaim) has the
|
||||
* Remove-side that cares about stability of _refcount (eg. reclaim) has the
|
||||
* following (with tree_lock held for write):
|
||||
* A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
|
||||
* B. remove page from pagecache
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ extern void poll_initwait(struct poll_wqueues *pwq);
|
|||
extern void poll_freewait(struct poll_wqueues *pwq);
|
||||
extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
|
||||
ktime_t *expires, unsigned long slack);
|
||||
extern u64 select_estimate_accuracy(struct timespec *tv);
|
||||
extern u64 select_estimate_accuracy(struct timespec64 *tv);
|
||||
|
||||
|
||||
static inline int poll_schedule(struct poll_wqueues *pwq, int state)
|
||||
|
|
@ -153,12 +153,13 @@ void zero_fd_set(unsigned long nr, unsigned long *fdset)
|
|||
|
||||
#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
|
||||
|
||||
extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time);
|
||||
extern int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time);
|
||||
extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds,
|
||||
struct timespec *end_time);
|
||||
struct timespec64 *end_time);
|
||||
extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
|
||||
fd_set __user *exp, struct timespec *end_time);
|
||||
fd_set __user *exp, struct timespec64 *end_time);
|
||||
|
||||
extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec);
|
||||
extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec,
|
||||
long nsec);
|
||||
|
||||
#endif /* _LINUX_POLL_H */
|
||||
|
|
|
|||
|
|
@ -315,8 +315,8 @@ static __always_inline int kmalloc_index(size_t size)
|
|||
}
|
||||
#endif /* !CONFIG_SLOB */
|
||||
|
||||
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment;
|
||||
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment;
|
||||
void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc;
|
||||
void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc;
|
||||
void kmem_cache_free(struct kmem_cache *, void *);
|
||||
|
||||
/*
|
||||
|
|
@ -339,8 +339,8 @@ static __always_inline void kfree_bulk(size_t size, void **p)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
|
||||
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;
|
||||
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc;
|
||||
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc;
|
||||
#else
|
||||
static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
|
|
@ -354,12 +354,12 @@ static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t f
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment;
|
||||
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc;
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
||||
gfp_t gfpflags,
|
||||
int node, size_t size) __assume_slab_alignment;
|
||||
int node, size_t size) __assume_slab_alignment __malloc;
|
||||
#else
|
||||
static __always_inline void *
|
||||
kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
||||
|
|
@ -392,10 +392,10 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
|
|||
}
|
||||
#endif /* CONFIG_TRACING */
|
||||
|
||||
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
|
||||
extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment;
|
||||
extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc;
|
||||
#else
|
||||
static __always_inline void *
|
||||
kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
|
||||
|
|
|
|||
|
|
@ -80,6 +80,10 @@ struct kmem_cache {
|
|||
struct kasan_cache kasan_info;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SLAB_FREELIST_RANDOM
|
||||
void *random_seq;
|
||||
#endif
|
||||
|
||||
struct kmem_cache_node *node[MAX_NUMNODES];
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ char *strreplace(char *s, char old, char new);
|
|||
|
||||
extern void kfree_const(const void *x);
|
||||
|
||||
extern char *kstrdup(const char *s, gfp_t gfp);
|
||||
extern char *kstrdup(const char *s, gfp_t gfp) __malloc;
|
||||
extern const char *kstrdup_const(const char *s, gfp_t gfp);
|
||||
extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
|
||||
extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
|
||||
|
|
|
|||
|
|
@ -65,7 +65,6 @@ static inline struct itimerspec64 itimerspec_to_itimerspec64(struct itimerspec *
|
|||
# define timespec64_equal timespec_equal
|
||||
# define timespec64_compare timespec_compare
|
||||
# define set_normalized_timespec64 set_normalized_timespec
|
||||
# define timespec64_add_safe timespec_add_safe
|
||||
# define timespec64_add timespec_add
|
||||
# define timespec64_sub timespec_sub
|
||||
# define timespec64_valid timespec_valid
|
||||
|
|
@ -134,15 +133,6 @@ static inline int timespec64_compare(const struct timespec64 *lhs, const struct
|
|||
|
||||
extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec);
|
||||
|
||||
/*
|
||||
* timespec64_add_safe assumes both values are positive and checks for
|
||||
* overflow. It will return TIME_T_MAX if the returned value would be
|
||||
* smaller then either of the arguments.
|
||||
*/
|
||||
extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
|
||||
const struct timespec64 rhs);
|
||||
|
||||
|
||||
static inline struct timespec64 timespec64_add(struct timespec64 lhs,
|
||||
struct timespec64 rhs)
|
||||
{
|
||||
|
|
@ -224,4 +214,11 @@ static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns)
|
|||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* timespec64_add_safe assumes both values are positive and checks for
|
||||
* overflow. It will return TIME64_MAX in case of overflow.
|
||||
*/
|
||||
extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
|
||||
const struct timespec64 rhs);
|
||||
|
||||
#endif /* _LINUX_TIME64_H */
|
||||
|
|
|
|||
|
|
@ -163,12 +163,10 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
|
|||
#ifdef CONFIG_NUMA
|
||||
|
||||
extern unsigned long node_page_state(int node, enum zone_stat_item item);
|
||||
extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
|
||||
|
||||
#else
|
||||
|
||||
#define node_page_state(node, item) global_page_state(item)
|
||||
#define zone_statistics(_zl, _z, gfp) do { } while (0)
|
||||
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
|
|
@ -193,6 +191,10 @@ void quiet_vmstat(void);
|
|||
void cpu_vm_stats_fold(int cpu);
|
||||
void refresh_zone_stat_thresholds(void);
|
||||
|
||||
struct ctl_table;
|
||||
int vmstat_refresh(struct ctl_table *, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
|
||||
void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
|
||||
|
||||
int calculate_pressure_threshold(struct zone *zone);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue