Merge branch 'akpm' (patches from Andrew)

Merge misc updates from Andrew Morton:

 - a few misc things

 - the rest of MM

-  remove flex_arrays, replace with new simple radix-tree implementation

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (38 commits)
  Drop flex_arrays
  sctp: convert to genradix
  proc: commit to genradix
  generic radix trees
  selinux: convert to kvmalloc
  md: convert to kvmalloc
  openvswitch: convert to kvmalloc
  of: fix kmemleak crash caused by imbalance in early memory reservation
  mm: memblock: update comments and kernel-doc
  memblock: split checks whether a region should be skipped to a helper function
  memblock: remove memblock_{set,clear}_region_flags
  memblock: drop memblock_alloc_*_nopanic() variants
  memblock: memblock_alloc_try_nid: don't panic
  treewide: add checks for the return value of memblock_alloc*()
  swiotlb: add checks for the return value of memblock_alloc*()
  init/main: add checks for the return value of memblock_alloc*()
  mm/percpu: add checks for the return value of memblock_alloc*()
  sparc: add checks for the return value of memblock_alloc*()
  ia64: add checks for the return value of memblock_alloc*()
  arch: don't memset(0) memory returned by memblock_alloc()
  ...
This commit is contained in:
Linus Torvalds 2019-03-12 10:39:53 -07:00
commit a667cb7a94
159 changed files with 1654 additions and 1710 deletions

View file

@ -1,149 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _FLEX_ARRAY_H
#define _FLEX_ARRAY_H
#include <linux/types.h>
#include <linux/reciprocal_div.h>
#include <asm/page.h>
#define FLEX_ARRAY_PART_SIZE PAGE_SIZE
#define FLEX_ARRAY_BASE_SIZE PAGE_SIZE
struct flex_array_part;
/*
* This is meant to replace cases where an array-like
* structure has gotten too big to fit into kmalloc()
* and the developer is getting tempted to use
* vmalloc().
*/
struct flex_array {
union {
struct {
int element_size;
int total_nr_elements;
int elems_per_part;
struct reciprocal_value reciprocal_elems;
struct flex_array_part *parts[];
};
/*
* This little trick makes sure that
* sizeof(flex_array) == PAGE_SIZE
*/
char padding[FLEX_ARRAY_BASE_SIZE];
};
};
/* Number of bytes left in base struct flex_array, excluding metadata */
#define FLEX_ARRAY_BASE_BYTES_LEFT \
(FLEX_ARRAY_BASE_SIZE - offsetof(struct flex_array, parts))
/* Number of pointers in base to struct flex_array_part pages */
#define FLEX_ARRAY_NR_BASE_PTRS \
(FLEX_ARRAY_BASE_BYTES_LEFT / sizeof(struct flex_array_part *))
/* Number of elements of size that fit in struct flex_array_part */
#define FLEX_ARRAY_ELEMENTS_PER_PART(size) \
(FLEX_ARRAY_PART_SIZE / size)
/*
* Defines a statically allocated flex array and ensures its parameters are
* valid.
*/
#define DEFINE_FLEX_ARRAY(__arrayname, __element_size, __total) \
struct flex_array __arrayname = { { { \
.element_size = (__element_size), \
.total_nr_elements = (__total), \
} } }; \
static inline void __arrayname##_invalid_parameter(void) \
{ \
BUILD_BUG_ON((__total) > FLEX_ARRAY_NR_BASE_PTRS * \
FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \
}
/**
* flex_array_alloc() - Creates a flexible array.
* @element_size: individual object size.
* @total: maximum number of objects which can be stored.
* @flags: GFP flags
*
* Return: Returns an object of structure flex_array.
*/
struct flex_array *flex_array_alloc(int element_size, unsigned int total,
gfp_t flags);
/**
* flex_array_prealloc() - Ensures that memory for the elements indexed in the
* range defined by start and nr_elements has been allocated.
* @fa: array to allocate memory to.
* @start: start address
* @nr_elements: number of elements to be allocated.
* @flags: GFP flags
*
*/
int flex_array_prealloc(struct flex_array *fa, unsigned int start,
unsigned int nr_elements, gfp_t flags);
/**
* flex_array_free() - Removes all elements of a flexible array.
* @fa: array to be freed.
*/
void flex_array_free(struct flex_array *fa);
/**
* flex_array_free_parts() - Removes all elements of a flexible array, but
* leaves the array itself in place.
* @fa: array to be emptied.
*/
void flex_array_free_parts(struct flex_array *fa);
/**
* flex_array_put() - Stores data into a flexible array.
* @fa: array where element is to be stored.
* @element_nr: position to copy, must be less than the maximum specified when
* the array was created.
* @src: data source to be copied into the array.
* @flags: GFP flags
*
* Return: Returns zero on success, a negative error code otherwise.
*/
int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
gfp_t flags);
/**
* flex_array_clear() - Clears an individual element in the array, sets the
* given element to FLEX_ARRAY_FREE.
* @element_nr: element position to clear.
* @fa: array to which element to be cleared belongs.
*
* Return: Returns zero on success, -EINVAL otherwise.
*/
int flex_array_clear(struct flex_array *fa, unsigned int element_nr);
/**
* flex_array_get() - Retrieves data into a flexible array.
*
* @element_nr: Element position to retrieve data from.
* @fa: array from which data is to be retrieved.
*
* Return: Returns a pointer to the data element, or NULL if that
* particular element has never been allocated.
*/
void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
/**
* flex_array_shrink() - Reduces the allocated size of an array.
* @fa: array to shrink.
*
* Return: Returns number of pages of memory actually freed.
*
*/
int flex_array_shrink(struct flex_array *fa);
#define flex_array_put_ptr(fa, nr, src, gfp) \
flex_array_put(fa, nr, (void *)&(src), gfp)
void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr);
#endif /* _FLEX_ARRAY_H */

View file

@ -0,0 +1,231 @@
#ifndef _LINUX_GENERIC_RADIX_TREE_H
#define _LINUX_GENERIC_RADIX_TREE_H
/**
* DOC: Generic radix trees/sparse arrays:
*
* Very simple and minimalistic, supporting arbitrary size entries up to
* PAGE_SIZE.
*
* A genradix is defined with the type it will store, like so:
*
* static GENRADIX(struct foo) foo_genradix;
*
* The main operations are:
*
* - genradix_init(radix) - initialize an empty genradix
*
* - genradix_free(radix) - free all memory owned by the genradix and
* reinitialize it
*
* - genradix_ptr(radix, idx) - gets a pointer to the entry at idx, returning
* NULL if that entry does not exist
*
* - genradix_ptr_alloc(radix, idx, gfp) - gets a pointer to an entry,
* allocating it if necessary
*
* - genradix_for_each(radix, iter, p) - iterate over each entry in a genradix
*
* The radix tree allocates one page of entries at a time, so entries may exist
* that were never explicitly allocated - they will be initialized to all
* zeroes.
*
* Internally, a genradix is just a radix tree of pages, and indexing works in
* terms of byte offsets. The wrappers in this header file use sizeof on the
* type the radix contains to calculate a byte offset from the index - see
* __idx_to_offset.
*/
#include <asm/page.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/log2.h>
struct genradix_root;
struct __genradix {
struct genradix_root __rcu *root;
};
/*
* NOTE: currently, sizeof(_type) must not be larger than PAGE_SIZE:
*/
#define __GENRADIX_INITIALIZER \
{ \
.tree = { \
.root = NULL, \
} \
}
/*
* We use a 0 size array to stash the type we're storing without taking any
* space at runtime - then the various accessor macros can use typeof() to get
* to it for casts/sizeof - we also force the alignment so that storing a type
* with a ridiculous alignment doesn't blow up the alignment or size of the
* genradix.
*/
#define GENRADIX(_type) \
struct { \
struct __genradix tree; \
_type type[0] __aligned(1); \
}
#define DEFINE_GENRADIX(_name, _type) \
GENRADIX(_type) _name = __GENRADIX_INITIALIZER
/**
* genradix_init - initialize a genradix
* @_radix: genradix to initialize
*
* Does not fail
*/
#define genradix_init(_radix) \
do { \
*(_radix) = (typeof(*_radix)) __GENRADIX_INITIALIZER; \
} while (0)
void __genradix_free(struct __genradix *);
/**
* genradix_free: free all memory owned by a genradix
* @_radix: the genradix to free
*
* After freeing, @_radix will be reinitialized and empty
*/
#define genradix_free(_radix) __genradix_free(&(_radix)->tree)
static inline size_t __idx_to_offset(size_t idx, size_t obj_size)
{
if (__builtin_constant_p(obj_size))
BUILD_BUG_ON(obj_size > PAGE_SIZE);
else
BUG_ON(obj_size > PAGE_SIZE);
if (!is_power_of_2(obj_size)) {
size_t objs_per_page = PAGE_SIZE / obj_size;
return (idx / objs_per_page) * PAGE_SIZE +
(idx % objs_per_page) * obj_size;
} else {
return idx * obj_size;
}
}
#define __genradix_cast(_radix) (typeof((_radix)->type[0]) *)
#define __genradix_obj_size(_radix) sizeof((_radix)->type[0])
#define __genradix_idx_to_offset(_radix, _idx) \
__idx_to_offset(_idx, __genradix_obj_size(_radix))
void *__genradix_ptr(struct __genradix *, size_t);
/**
* genradix_ptr - get a pointer to a genradix entry
* @_radix: genradix to access
* @_idx: index to fetch
*
* Returns a pointer to entry at @_idx, or NULL if that entry does not exist.
*/
#define genradix_ptr(_radix, _idx) \
(__genradix_cast(_radix) \
__genradix_ptr(&(_radix)->tree, \
__genradix_idx_to_offset(_radix, _idx)))
void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t);
/**
* genradix_ptr_alloc - get a pointer to a genradix entry, allocating it
* if necessary
* @_radix: genradix to access
* @_idx: index to fetch
* @_gfp: gfp mask
*
* Returns a pointer to entry at @_idx, or NULL on allocation failure
*/
#define genradix_ptr_alloc(_radix, _idx, _gfp) \
(__genradix_cast(_radix) \
__genradix_ptr_alloc(&(_radix)->tree, \
__genradix_idx_to_offset(_radix, _idx), \
_gfp))
struct genradix_iter {
size_t offset;
size_t pos;
};
/**
* genradix_iter_init - initialize a genradix_iter
* @_radix: genradix that will be iterated over
* @_idx: index to start iterating from
*/
#define genradix_iter_init(_radix, _idx) \
((struct genradix_iter) { \
.pos = (_idx), \
.offset = __genradix_idx_to_offset((_radix), (_idx)),\
})
void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t);
/**
* genradix_iter_peek - get first entry at or above iterator's current
* position
* @_iter: a genradix_iter
* @_radix: genradix being iterated over
*
* If no more entries exist at or above @_iter's current position, returns NULL
*/
#define genradix_iter_peek(_iter, _radix) \
(__genradix_cast(_radix) \
__genradix_iter_peek(_iter, &(_radix)->tree, \
PAGE_SIZE / __genradix_obj_size(_radix)))
static inline void __genradix_iter_advance(struct genradix_iter *iter,
size_t obj_size)
{
iter->offset += obj_size;
if (!is_power_of_2(obj_size) &&
(iter->offset & (PAGE_SIZE - 1)) + obj_size > PAGE_SIZE)
iter->offset = round_up(iter->offset, PAGE_SIZE);
iter->pos++;
}
#define genradix_iter_advance(_iter, _radix) \
__genradix_iter_advance(_iter, __genradix_obj_size(_radix))
#define genradix_for_each_from(_radix, _iter, _p, _start) \
for (_iter = genradix_iter_init(_radix, _start); \
(_p = genradix_iter_peek(&_iter, _radix)) != NULL; \
genradix_iter_advance(&_iter, _radix))
/**
* genradix_for_each - iterate over entry in a genradix
* @_radix: genradix to iterate over
* @_iter: a genradix_iter to track current position
* @_p: pointer to genradix entry type
*
* On every iteration, @_p will point to the current entry, and @_iter.pos
* will be the current entry's index.
*/
#define genradix_for_each(_radix, _iter, _p) \
genradix_for_each_from(_radix, _iter, _p, 0)
int __genradix_prealloc(struct __genradix *, size_t, gfp_t);
/**
* genradix_prealloc - preallocate entries in a generic radix tree
* @_radix: genradix to preallocate
* @_nr: number of entries to preallocate
* @_gfp: gfp mask
*
* Returns 0 on success, -ENOMEM on failure
*/
#define genradix_prealloc(_radix, _nr, _gfp) \
__genradix_prealloc(&(_radix)->tree, \
__genradix_idx_to_offset(_radix, _nr + 1),\
_gfp)
#endif /* _LINUX_GENERIC_RADIX_TREE_H */

View file

@ -468,7 +468,7 @@ struct hmm_devmem_ops {
* Note that mmap semaphore is held in read mode at least when this
* callback occurs, hence the vma is valid upon callback entry.
*/
int (*fault)(struct hmm_devmem *devmem,
vm_fault_t (*fault)(struct hmm_devmem *devmem,
struct vm_area_struct *vma,
unsigned long addr,
const struct page *page,
@ -511,7 +511,7 @@ struct hmm_devmem_ops {
* chunk, as an optimization. It must, however, prioritize the faulting address
* over all the others.
*/
typedef int (*dev_page_fault_t)(struct vm_area_struct *vma,
typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma,
unsigned long addr,
const struct page *page,
unsigned int flags,

View file

@ -108,9 +108,6 @@ void memblock_discard(void);
#define memblock_dbg(fmt, ...) \
if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
int nid, enum memblock_flags flags);
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align);
void memblock_allow_resize(void);
@ -127,7 +124,6 @@ int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
enum memblock_flags choose_memblock_flags(void);
unsigned long memblock_free_all(void);
void reset_node_managed_pages(pg_data_t *pgdat);
@ -277,18 +273,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
nid, flags, p_start, p_end, p_nid)
static inline void memblock_set_region_flags(struct memblock_region *r,
enum memblock_flags flags)
{
r->flags |= flags;
}
static inline void memblock_clear_region_flags(struct memblock_region *r,
enum memblock_flags flags)
{
r->flags &= ~flags;
}
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int memblock_set_node(phys_addr_t base, phys_addr_t size,
struct memblock_type *type, int nid);
@ -325,17 +309,20 @@ static inline int memblock_get_region_node(const struct memblock_region *r)
#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
#endif
phys_addr_t memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end);
phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
phys_addr_t memblock_phys_alloc(phys_addr_t size, phys_addr_t align);
static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
phys_addr_t align)
{
return memblock_phys_alloc_range(size, align, 0,
MEMBLOCK_ALLOC_ACCESSIBLE);
}
void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid);
void *memblock_alloc_try_nid_nopanic(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid);
void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid);
@ -362,36 +349,12 @@ static inline void * __init memblock_alloc_from(phys_addr_t size,
MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
}
static inline void * __init memblock_alloc_nopanic(phys_addr_t size,
phys_addr_t align)
{
return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE,
NUMA_NO_NODE);
}
static inline void * __init memblock_alloc_low(phys_addr_t size,
phys_addr_t align)
{
return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
}
static inline void * __init memblock_alloc_low_nopanic(phys_addr_t size,
phys_addr_t align)
{
return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
ARCH_LOW_ADDRESS_LIMIT,
NUMA_NO_NODE);
}
static inline void * __init memblock_alloc_from_nopanic(phys_addr_t size,
phys_addr_t align,
phys_addr_t min_addr)
{
return memblock_alloc_try_nid_nopanic(size, align, min_addr,
MEMBLOCK_ALLOC_ACCESSIBLE,
NUMA_NO_NODE);
}
static inline void * __init memblock_alloc_node(phys_addr_t size,
phys_addr_t align, int nid)
@ -400,14 +363,6 @@ static inline void * __init memblock_alloc_node(phys_addr_t size,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size,
int nid)
{
return memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES,
MEMBLOCK_LOW_LIMIT,
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
static inline void __init memblock_free_early(phys_addr_t base,
phys_addr_t size)
{
@ -443,16 +398,6 @@ static inline bool memblock_bottom_up(void)
return memblock.bottom_up;
}
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
enum memblock_flags flags);
phys_addr_t memblock_alloc_base_nid(phys_addr_t size,
phys_addr_t align, phys_addr_t max_addr,
int nid, enum memblock_flags flags);
phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
phys_addr_t max_addr);
phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
phys_addr_t max_addr);
phys_addr_t memblock_phys_mem_size(void);
phys_addr_t memblock_reserved_size(void);
phys_addr_t memblock_mem_size(unsigned long limit_pfn);

View file

@ -26,6 +26,7 @@
#include <linux/page_ref.h>
#include <linux/memremap.h>
#include <linux/overflow.h>
#include <linux/sizes.h>
struct mempolicy;
struct anon_vma;
@ -2402,8 +2403,7 @@ int __must_check write_one_page(struct page *page);
void task_dirty_inc(struct task_struct *tsk);
/* readahead.c */
#define VM_MAX_READAHEAD 128 /* kbytes */
#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
pgoff_t offset, unsigned long nr_to_read);

View file

@ -83,9 +83,6 @@
#define MUTEX_DEBUG_FREE 0x22
#define MUTEX_POISON_WW_CTX ((void *) 0x500 + POISON_POINTER_DELTA)
/********** lib/flex_array.c **********/
#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */
/********** security/ **********/
#define KEY_DESTROY 0xbd