Updates to various subsystems which I help look after. lib, ocfs2,
fatfs, autofs, squashfs, procfs, etc. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCYu9BeQAKCRDdBJ7gKXxA jp1DAP4mjCSvAwYzXklrIt+Knv3CEY5oVVdS+pWOAOGiJpldTAD9E5/0NV+VmlD9 kwS/13j38guulSlXRzDLmitbg81zAAI= =Zfum -----END PGP SIGNATURE----- Merge tag 'mm-nonmm-stable-2022-08-06-2' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc updates from Andrew Morton: "Updates to various subsystems which I help look after. lib, ocfs2, fatfs, autofs, squashfs, procfs, etc. A relatively small amount of material this time" * tag 'mm-nonmm-stable-2022-08-06-2' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (72 commits) scripts/gdb: ensure the absolute path is generated on initial source MAINTAINERS: kunit: add David Gow as a maintainer of KUnit mailmap: add linux.dev alias for Brendan Higgins mailmap: update Kirill's email profile: setup_profiling_timer() is moslty not implemented ocfs2: fix a typo in a comment ocfs2: use the bitmap API to simplify code ocfs2: remove some useless functions lib/mpi: fix typo 'the the' in comment proc: add some (hopefully) insightful comments bdi: remove enum wb_congested_state kernel/hung_task: fix address space of proc_dohung_task_timeout_secs lib/lzo/lzo1x_compress.c: replace ternary operator with min() and min_t() squashfs: support reading fragments in readahead call squashfs: implement readahead squashfs: always build "file direct" version of page actor Revert "squashfs: provide backing_dev_info in order to disable read-ahead" fs/ocfs2: Fix spelling typo in comment ia64: old_rr4 added under CONFIG_HUGETLB_PAGE proc: fix test for "vsyscall=xonly" boot option ...
This commit is contained in:
commit
eb5699ba31
102 changed files with 1313 additions and 724 deletions
|
|
@ -28,11 +28,6 @@ enum wb_state {
|
|||
WB_start_all, /* nr_pages == 0 (all) work pending */
|
||||
};
|
||||
|
||||
enum wb_congested_state {
|
||||
WB_async_congested, /* The async (write) queue is getting full */
|
||||
WB_sync_congested, /* The sync queue is getting full */
|
||||
};
|
||||
|
||||
enum wb_stat_item {
|
||||
WB_RECLAIMABLE,
|
||||
WB_WRITEBACK,
|
||||
|
|
@ -122,8 +117,6 @@ struct bdi_writeback {
|
|||
atomic_t writeback_inodes; /* number of inodes under writeback */
|
||||
struct percpu_counter stat[NR_WB_STAT_ITEMS];
|
||||
|
||||
unsigned long congested; /* WB_[a]sync_congested flags */
|
||||
|
||||
unsigned long bw_time_stamp; /* last time write bw is updated */
|
||||
unsigned long dirtied_stamp;
|
||||
unsigned long written_stamp; /* pages written at bw_time_stamp */
|
||||
|
|
|
|||
|
|
@ -66,17 +66,6 @@
|
|||
__builtin_unreachable(); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* GCC 'asm goto' miscompiles certain code sequences:
|
||||
*
|
||||
* http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
|
||||
*
|
||||
* Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
|
||||
*
|
||||
* (asm goto is automatically volatile - the naming reflects this.)
|
||||
*/
|
||||
#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
|
||||
|
||||
#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP)
|
||||
#define __HAVE_BUILTIN_BSWAP32__
|
||||
#define __HAVE_BUILTIN_BSWAP64__
|
||||
|
|
|
|||
|
|
@ -116,85 +116,6 @@ static __always_inline unsigned int cpumask_check(unsigned int cpu)
|
|||
return cpu;
|
||||
}
|
||||
|
||||
#if NR_CPUS == 1
|
||||
/* Uniprocessor. Assume all masks are "1". */
|
||||
static inline unsigned int cpumask_first(const struct cpumask *srcp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned int cpumask_first_and(const struct cpumask *srcp1,
|
||||
const struct cpumask *srcp2)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned int cpumask_last(const struct cpumask *srcp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Valid inputs for n are -1 and 0. */
|
||||
static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
|
||||
{
|
||||
return n+1;
|
||||
}
|
||||
|
||||
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
|
||||
{
|
||||
return n+1;
|
||||
}
|
||||
|
||||
static inline unsigned int cpumask_next_and(int n,
|
||||
const struct cpumask *srcp,
|
||||
const struct cpumask *andp)
|
||||
{
|
||||
return n+1;
|
||||
}
|
||||
|
||||
static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask,
|
||||
int start, bool wrap)
|
||||
{
|
||||
/* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */
|
||||
return (wrap && n == 0);
|
||||
}
|
||||
|
||||
/* cpu must be a valid cpu, ie 0, so there's no other choice. */
|
||||
static inline unsigned int cpumask_any_but(const struct cpumask *mask,
|
||||
unsigned int cpu)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline unsigned int cpumask_local_spread(unsigned int i, int node)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
|
||||
const struct cpumask *src2p) {
|
||||
return cpumask_first_and(src1p, src2p);
|
||||
}
|
||||
|
||||
static inline int cpumask_any_distribute(const struct cpumask *srcp)
|
||||
{
|
||||
return cpumask_first(srcp);
|
||||
}
|
||||
|
||||
#define for_each_cpu(cpu, mask) \
|
||||
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
|
||||
#define for_each_cpu_not(cpu, mask) \
|
||||
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
|
||||
#define for_each_cpu_wrap(cpu, mask, start) \
|
||||
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
|
||||
#define for_each_cpu_and(cpu, mask1, mask2) \
|
||||
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask1, (void)mask2)
|
||||
#else
|
||||
/**
|
||||
* cpumask_first - get the first cpu in a cpumask
|
||||
* @srcp: the cpumask pointer
|
||||
|
|
@ -260,10 +181,29 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
|
|||
|
||||
int __pure cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
|
||||
int __pure cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
|
||||
|
||||
#if NR_CPUS == 1
|
||||
/* Uniprocessor: there is only one valid CPU */
|
||||
static inline unsigned int cpumask_local_spread(unsigned int i, int node)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
|
||||
const struct cpumask *src2p) {
|
||||
return cpumask_first_and(src1p, src2p);
|
||||
}
|
||||
|
||||
static inline int cpumask_any_distribute(const struct cpumask *srcp)
|
||||
{
|
||||
return cpumask_first(srcp);
|
||||
}
|
||||
#else
|
||||
unsigned int cpumask_local_spread(unsigned int i, int node);
|
||||
int cpumask_any_and_distribute(const struct cpumask *src1p,
|
||||
const struct cpumask *src2p);
|
||||
int cpumask_any_distribute(const struct cpumask *srcp);
|
||||
#endif /* NR_CPUS */
|
||||
|
||||
/**
|
||||
* for_each_cpu - iterate over every cpu in a mask
|
||||
|
|
@ -289,7 +229,7 @@ int cpumask_any_distribute(const struct cpumask *srcp);
|
|||
(cpu) = cpumask_next_zero((cpu), (mask)), \
|
||||
(cpu) < nr_cpu_ids;)
|
||||
|
||||
extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
|
||||
int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
|
||||
|
||||
/**
|
||||
* for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
|
||||
|
|
@ -324,7 +264,6 @@ extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool
|
|||
for ((cpu) = -1; \
|
||||
(cpu) = cpumask_next_and((cpu), (mask1), (mask2)), \
|
||||
(cpu) < nr_cpu_ids;)
|
||||
#endif /* SMP */
|
||||
|
||||
#define CPU_BITS_NONE \
|
||||
{ \
|
||||
|
|
@ -811,9 +750,16 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
|
|||
/* First bits of cpu_bit_bitmap are in fact unset. */
|
||||
#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
|
||||
|
||||
#if NR_CPUS == 1
|
||||
/* Uniprocessor: the possible/online/present masks are always "1" */
|
||||
#define for_each_possible_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
|
||||
#define for_each_online_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
|
||||
#define for_each_present_cpu(cpu) for ((cpu) = 0; (cpu) < 1; (cpu)++)
|
||||
#else
|
||||
#define for_each_possible_cpu(cpu) for_each_cpu((cpu), cpu_possible_mask)
|
||||
#define for_each_online_cpu(cpu) for_each_cpu((cpu), cpu_online_mask)
|
||||
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
|
||||
#endif
|
||||
|
||||
/* Wrappers for arch boot code to manipulate normally-constant masks */
|
||||
void init_cpu_present(const struct cpumask *src);
|
||||
|
|
|
|||
|
|
@ -35,21 +35,21 @@ static inline const char *kernel_read_file_id_str(enum kernel_read_file_id id)
|
|||
return kernel_read_file_str[id];
|
||||
}
|
||||
|
||||
int kernel_read_file(struct file *file, loff_t offset,
|
||||
void **buf, size_t buf_size,
|
||||
size_t *file_size,
|
||||
enum kernel_read_file_id id);
|
||||
int kernel_read_file_from_path(const char *path, loff_t offset,
|
||||
void **buf, size_t buf_size,
|
||||
size_t *file_size,
|
||||
enum kernel_read_file_id id);
|
||||
int kernel_read_file_from_path_initns(const char *path, loff_t offset,
|
||||
void **buf, size_t buf_size,
|
||||
size_t *file_size,
|
||||
enum kernel_read_file_id id);
|
||||
int kernel_read_file_from_fd(int fd, loff_t offset,
|
||||
void **buf, size_t buf_size,
|
||||
size_t *file_size,
|
||||
enum kernel_read_file_id id);
|
||||
ssize_t kernel_read_file(struct file *file, loff_t offset,
|
||||
void **buf, size_t buf_size,
|
||||
size_t *file_size,
|
||||
enum kernel_read_file_id id);
|
||||
ssize_t kernel_read_file_from_path(const char *path, loff_t offset,
|
||||
void **buf, size_t buf_size,
|
||||
size_t *file_size,
|
||||
enum kernel_read_file_id id);
|
||||
ssize_t kernel_read_file_from_path_initns(const char *path, loff_t offset,
|
||||
void **buf, size_t buf_size,
|
||||
size_t *file_size,
|
||||
enum kernel_read_file_id id);
|
||||
ssize_t kernel_read_file_from_fd(int fd, loff_t offset,
|
||||
void **buf, size_t buf_size,
|
||||
size_t *file_size,
|
||||
enum kernel_read_file_id id);
|
||||
|
||||
#endif /* _LINUX_KERNEL_READ_FILE_H */
|
||||
|
|
|
|||
|
|
@ -688,7 +688,7 @@ __kfifo_uint_must_check_helper( \
|
|||
* writer, you don't need extra locking to use these macro.
|
||||
*/
|
||||
#define kfifo_to_user(fifo, to, len, copied) \
|
||||
__kfifo_uint_must_check_helper( \
|
||||
__kfifo_int_must_check_helper( \
|
||||
({ \
|
||||
typeof((fifo) + 1) __tmp = (fifo); \
|
||||
void __user *__to = (to); \
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
#include <vdso/limits.h>
|
||||
|
||||
#define SIZE_MAX (~(size_t)0)
|
||||
#define SSIZE_MAX ((ssize_t)(SIZE_MAX >> 1))
|
||||
#define PHYS_ADDR_MAX (~(phys_addr_t)0)
|
||||
|
||||
#define U8_MAX ((u8)~0U)
|
||||
|
|
|
|||
|
|
@ -307,8 +307,6 @@ do { \
|
|||
|
||||
#define net_get_random_once(buf, nbytes) \
|
||||
get_random_once((buf), (nbytes))
|
||||
#define net_get_random_once_wait(buf, nbytes) \
|
||||
get_random_once_wait((buf), (nbytes))
|
||||
|
||||
/*
|
||||
* E.g. XFS meta- & log-data is in slab pages, or bcache meta
|
||||
|
|
|
|||
|
|
@ -54,7 +54,5 @@ void __do_once_done(bool *done, struct static_key_true *once_key,
|
|||
|
||||
#define get_random_once(buf, nbytes) \
|
||||
DO_ONCE(get_random_bytes, (buf), (nbytes))
|
||||
#define get_random_once_wait(buf, nbytes) \
|
||||
DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \
|
||||
|
||||
#endif /* _LINUX_ONCE_H */
|
||||
|
|
|
|||
|
|
@ -17,9 +17,9 @@
|
|||
#ifndef _LINUX_RBTREE_H
|
||||
#define _LINUX_RBTREE_H
|
||||
|
||||
#include <linux/container_of.h>
|
||||
#include <linux/rbtree_types.h>
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue