Linux 5.10-rc5
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAl+69egeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGTSYH/ifRBlaxy5UiHFc0 2zdR7pkjWrYfDTTT3sazIAhdlzzcfnkUqgFxOP45F4ZIqeTzunH3sUY+5UlT9IX7 liUgnLxQ/1R9Gx8kPGQfu+tLCey78xVFydGsqJoW9sPRw2R+apMdGGa/lOrk+OXz DXIN+dDnGFqwCCNJpK+rxQQhFf++IPpSI8z6Y23moOFhsDZrEziHuVFy2FGyRM6z prZ/us/tcobE8ptCk1RmOxLoJ1DR6UxpA2vLimTE+JD8siOsSWPbjE0KudnWCnd5 BLqIjrsPJbSxyuzzK3v9dnO5wMv7tMDuMIuYM/MQTXDttNwtsqt/aP6gdnUCym7N 5eHEj5g= =MuO1 -----END PGP SIGNATURE----- Merge 5.10-rc5 into staging-testing We want the staging/IIO fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
03c1136af5
1186 changed files with 13933 additions and 6427 deletions
|
|
@ -235,6 +235,8 @@ enum hctx_type {
|
|||
* @flags: Zero or more BLK_MQ_F_* flags.
|
||||
* @driver_data: Pointer to data owned by the block driver that created this
|
||||
* tag set.
|
||||
* @active_queues_shared_sbitmap:
|
||||
* number of active request queues per tag set.
|
||||
* @__bitmap_tags: A shared tags sbitmap, used over all hctx's
|
||||
* @__breserved_tags:
|
||||
* A shared reserved tags sbitmap, used over all hctx's
|
||||
|
|
|
|||
|
|
@ -61,21 +61,17 @@ static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
|
|||
*/
|
||||
static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
|
||||
{
|
||||
if (skb_shared(skb)) {
|
||||
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
|
||||
struct sk_buff *nskb;
|
||||
|
||||
if (likely(nskb)) {
|
||||
can_skb_set_owner(nskb, skb->sk);
|
||||
consume_skb(skb);
|
||||
return nskb;
|
||||
} else {
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
nskb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (unlikely(!nskb)) {
|
||||
kfree_skb(skb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* we can assume to have an unshared skb with proper owner */
|
||||
return skb;
|
||||
can_skb_set_owner(nskb, skb->sk);
|
||||
consume_skb(skb);
|
||||
return nskb;
|
||||
}
|
||||
|
||||
#endif /* !_CAN_SKB_H */
|
||||
|
|
|
|||
|
|
@ -8,8 +8,10 @@
|
|||
+ __clang_patchlevel__)
|
||||
|
||||
#if CLANG_VERSION < 100001
|
||||
#ifndef __BPF_TRACING__
|
||||
# error Sorry, your version of Clang is too old - please use 10.0.1 or newer.
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/* Compiler specific definitions for Clang compiler */
|
||||
|
||||
|
|
@ -60,12 +62,6 @@
|
|||
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
|
||||
#endif
|
||||
|
||||
/* The following are for compatibility with GCC, from compiler-gcc.h,
|
||||
* and may be redefined here because they should not be shared with other
|
||||
* compilers, like ICC.
|
||||
*/
|
||||
#define barrier() __asm__ __volatile__("" : : : "memory")
|
||||
|
||||
#if __has_feature(shadow_call_stack)
|
||||
# define __noscs __attribute__((__no_sanitize__("shadow-call-stack")))
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -15,25 +15,6 @@
|
|||
# error Sorry, your version of GCC is too old - please use 4.9 or newer.
|
||||
#endif
|
||||
|
||||
/* Optimization barrier */
|
||||
|
||||
/* The "volatile" is due to gcc bugs */
|
||||
#define barrier() __asm__ __volatile__("": : :"memory")
|
||||
/*
|
||||
* This version is i.e. to prevent dead stores elimination on @ptr
|
||||
* where gcc and llvm may behave differently when otherwise using
|
||||
* normal barrier(): while gcc behavior gets along with a normal
|
||||
* barrier(), llvm needs an explicit input variable to be assumed
|
||||
* clobbered. The issue is as follows: while the inline asm might
|
||||
* access any memory it wants, the compiler could have fit all of
|
||||
* @ptr into memory registers instead, and since @ptr never escaped
|
||||
* from that, it proved that the inline asm wasn't touching any of
|
||||
* it. This version works well with both compilers, i.e. we're telling
|
||||
* the compiler that the inline asm absolutely may see the contents
|
||||
* of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
|
||||
*/
|
||||
#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
|
||||
|
||||
/*
|
||||
* This macro obfuscates arithmetic on a variable address so that gcc
|
||||
* shouldn't recognize the original var, and make assumptions about it.
|
||||
|
|
@ -175,5 +156,3 @@
|
|||
#else
|
||||
#define __diag_GCC_8(s)
|
||||
#endif
|
||||
|
||||
#define __no_fgcse __attribute__((optimize("-fno-gcse")))
|
||||
|
|
|
|||
|
|
@ -80,11 +80,25 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
|||
|
||||
/* Optimization barrier */
|
||||
#ifndef barrier
|
||||
# define barrier() __memory_barrier()
|
||||
/* The "volatile" is due to gcc bugs */
|
||||
# define barrier() __asm__ __volatile__("": : :"memory")
|
||||
#endif
|
||||
|
||||
#ifndef barrier_data
|
||||
# define barrier_data(ptr) barrier()
|
||||
/*
|
||||
* This version is i.e. to prevent dead stores elimination on @ptr
|
||||
* where gcc and llvm may behave differently when otherwise using
|
||||
* normal barrier(): while gcc behavior gets along with a normal
|
||||
* barrier(), llvm needs an explicit input variable to be assumed
|
||||
* clobbered. The issue is as follows: while the inline asm might
|
||||
* access any memory it wants, the compiler could have fit all of
|
||||
* @ptr into memory registers instead, and since @ptr never escaped
|
||||
* from that, it proved that the inline asm wasn't touching any of
|
||||
* it. This version works well with both compilers, i.e. we're telling
|
||||
* the compiler that the inline asm absolutely may see the contents
|
||||
* of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
|
||||
*/
|
||||
# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
|
||||
#endif
|
||||
|
||||
/* workaround for GCC PR82365 if needed */
|
||||
|
|
|
|||
|
|
@ -247,10 +247,6 @@ struct ftrace_likely_data {
|
|||
#define asm_inline asm
|
||||
#endif
|
||||
|
||||
#ifndef __no_fgcse
|
||||
# define __no_fgcse
|
||||
#endif
|
||||
|
||||
/* Are two types/vars the same type (ignoring qualifiers)? */
|
||||
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
|
||||
|
||||
|
|
|
|||
|
|
@ -109,6 +109,12 @@ struct cpufreq_policy {
|
|||
bool fast_switch_possible;
|
||||
bool fast_switch_enabled;
|
||||
|
||||
/*
|
||||
* Set if the CPUFREQ_GOV_STRICT_TARGET flag is set for the current
|
||||
* governor.
|
||||
*/
|
||||
bool strict_target;
|
||||
|
||||
/*
|
||||
* Preferred average time interval between consecutive invocations of
|
||||
* the driver to set the frequency for this policy. To be set by the
|
||||
|
|
@ -570,12 +576,20 @@ struct cpufreq_governor {
|
|||
char *buf);
|
||||
int (*store_setspeed) (struct cpufreq_policy *policy,
|
||||
unsigned int freq);
|
||||
/* For governors which change frequency dynamically by themselves */
|
||||
bool dynamic_switching;
|
||||
struct list_head governor_list;
|
||||
struct module *owner;
|
||||
u8 flags;
|
||||
};
|
||||
|
||||
/* Governor flags */
|
||||
|
||||
/* For governors which change frequency dynamically by themselves */
|
||||
#define CPUFREQ_GOV_DYNAMIC_SWITCHING BIT(0)
|
||||
|
||||
/* For governors wanting the target frequency to be set exactly */
|
||||
#define CPUFREQ_GOV_STRICT_TARGET BIT(1)
|
||||
|
||||
|
||||
/* Pass a target to the cpufreq driver */
|
||||
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq);
|
||||
|
|
|
|||
|
|
@ -558,21 +558,21 @@ struct sk_filter {
|
|||
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
|
||||
|
||||
#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
|
||||
u32 ret; \
|
||||
u32 __ret; \
|
||||
cant_migrate(); \
|
||||
if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
|
||||
struct bpf_prog_stats *stats; \
|
||||
u64 start = sched_clock(); \
|
||||
ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
|
||||
stats = this_cpu_ptr(prog->aux->stats); \
|
||||
u64_stats_update_begin(&stats->syncp); \
|
||||
stats->cnt++; \
|
||||
stats->nsecs += sched_clock() - start; \
|
||||
u64_stats_update_end(&stats->syncp); \
|
||||
struct bpf_prog_stats *__stats; \
|
||||
u64 __start = sched_clock(); \
|
||||
__ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
|
||||
__stats = this_cpu_ptr(prog->aux->stats); \
|
||||
u64_stats_update_begin(&__stats->syncp); \
|
||||
__stats->cnt++; \
|
||||
__stats->nsecs += sched_clock() - __start; \
|
||||
u64_stats_update_end(&__stats->syncp); \
|
||||
} else { \
|
||||
ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
|
||||
__ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
|
||||
} \
|
||||
ret; })
|
||||
__ret; })
|
||||
|
||||
#define BPF_PROG_RUN(prog, ctx) \
|
||||
__BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func)
|
||||
|
|
|
|||
|
|
@ -1580,8 +1580,24 @@ extern struct timespec64 current_time(struct inode *inode);
|
|||
* Snapshotting support.
|
||||
*/
|
||||
|
||||
void __sb_end_write(struct super_block *sb, int level);
|
||||
int __sb_start_write(struct super_block *sb, int level, bool wait);
|
||||
/*
|
||||
* These are internal functions, please use sb_start_{write,pagefault,intwrite}
|
||||
* instead.
|
||||
*/
|
||||
static inline void __sb_end_write(struct super_block *sb, int level)
|
||||
{
|
||||
percpu_up_read(sb->s_writers.rw_sem + level-1);
|
||||
}
|
||||
|
||||
static inline void __sb_start_write(struct super_block *sb, int level)
|
||||
{
|
||||
percpu_down_read(sb->s_writers.rw_sem + level - 1);
|
||||
}
|
||||
|
||||
static inline bool __sb_start_write_trylock(struct super_block *sb, int level)
|
||||
{
|
||||
return percpu_down_read_trylock(sb->s_writers.rw_sem + level - 1);
|
||||
}
|
||||
|
||||
#define __sb_writers_acquired(sb, lev) \
|
||||
percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_)
|
||||
|
|
@ -1645,12 +1661,12 @@ static inline void sb_end_intwrite(struct super_block *sb)
|
|||
*/
|
||||
static inline void sb_start_write(struct super_block *sb)
|
||||
{
|
||||
__sb_start_write(sb, SB_FREEZE_WRITE, true);
|
||||
__sb_start_write(sb, SB_FREEZE_WRITE);
|
||||
}
|
||||
|
||||
static inline int sb_start_write_trylock(struct super_block *sb)
|
||||
static inline bool sb_start_write_trylock(struct super_block *sb)
|
||||
{
|
||||
return __sb_start_write(sb, SB_FREEZE_WRITE, false);
|
||||
return __sb_start_write_trylock(sb, SB_FREEZE_WRITE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1674,7 +1690,7 @@ static inline int sb_start_write_trylock(struct super_block *sb)
|
|||
*/
|
||||
static inline void sb_start_pagefault(struct super_block *sb)
|
||||
{
|
||||
__sb_start_write(sb, SB_FREEZE_PAGEFAULT, true);
|
||||
__sb_start_write(sb, SB_FREEZE_PAGEFAULT);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -1692,12 +1708,12 @@ static inline void sb_start_pagefault(struct super_block *sb)
|
|||
*/
|
||||
static inline void sb_start_intwrite(struct super_block *sb)
|
||||
{
|
||||
__sb_start_write(sb, SB_FREEZE_FS, true);
|
||||
__sb_start_write(sb, SB_FREEZE_FS);
|
||||
}
|
||||
|
||||
static inline int sb_start_intwrite_trylock(struct super_block *sb)
|
||||
static inline bool sb_start_intwrite_trylock(struct super_block *sb)
|
||||
{
|
||||
return __sb_start_write(sb, SB_FREEZE_FS, false);
|
||||
return __sb_start_write_trylock(sb, SB_FREEZE_FS);
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -2756,14 +2772,14 @@ static inline void file_start_write(struct file *file)
|
|||
{
|
||||
if (!S_ISREG(file_inode(file)->i_mode))
|
||||
return;
|
||||
__sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true);
|
||||
sb_start_write(file_inode(file)->i_sb);
|
||||
}
|
||||
|
||||
static inline bool file_start_write_trylock(struct file *file)
|
||||
{
|
||||
if (!S_ISREG(file_inode(file)->i_mode))
|
||||
return true;
|
||||
return __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, false);
|
||||
return sb_start_write_trylock(file_inode(file)->i_sb);
|
||||
}
|
||||
|
||||
static inline void file_end_write(struct file *file)
|
||||
|
|
|
|||
|
|
@ -315,7 +315,7 @@ static inline int get_disk_ro(struct gendisk *disk)
|
|||
extern void disk_block_events(struct gendisk *disk);
|
||||
extern void disk_unblock_events(struct gendisk *disk);
|
||||
extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
|
||||
void set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size,
|
||||
bool set_capacity_revalidate_and_notify(struct gendisk *disk, sector_t size,
|
||||
bool update_bdev);
|
||||
|
||||
/* drivers/char/random.c */
|
||||
|
|
|
|||
|
|
@ -798,7 +798,6 @@ extern int iommu_calculate_agaw(struct intel_iommu *iommu);
|
|||
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
|
||||
extern int dmar_disabled;
|
||||
extern int intel_iommu_enabled;
|
||||
extern int intel_iommu_tboot_noforce;
|
||||
extern int intel_iommu_gfx_mapped;
|
||||
#else
|
||||
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
|
||||
|
|
|
|||
|
|
@ -30,7 +30,8 @@ struct io_uring_task {
|
|||
struct percpu_counter inflight;
|
||||
struct io_identity __identity;
|
||||
struct io_identity *identity;
|
||||
bool in_idle;
|
||||
atomic_t in_idle;
|
||||
bool sqpoll;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_IO_URING)
|
||||
|
|
|
|||
|
|
@ -221,7 +221,7 @@ struct iomap_writeback_ops {
|
|||
* Optional, allows the file system to discard state on a page where
|
||||
* we failed to submit any I/O.
|
||||
*/
|
||||
void (*discard_page)(struct page *page);
|
||||
void (*discard_page)(struct page *page, loff_t fileoff);
|
||||
};
|
||||
|
||||
struct iomap_writepage_ctx {
|
||||
|
|
|
|||
|
|
@ -68,6 +68,7 @@ extern void *jbd2_alloc(size_t size, gfp_t flags);
|
|||
extern void jbd2_free(void *ptr, size_t size);
|
||||
|
||||
#define JBD2_MIN_JOURNAL_BLOCKS 1024
|
||||
#define JBD2_MIN_FC_BLOCKS 256
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
|
|
@ -400,7 +401,7 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
|
|||
#define JI_WAIT_DATA (1 << __JI_WAIT_DATA)
|
||||
|
||||
/**
|
||||
* struct jbd_inode - The jbd_inode type is the structure linking inodes in
|
||||
* struct jbd2_inode - The jbd_inode type is the structure linking inodes in
|
||||
* ordered mode present in a transaction so that we can sync them during commit.
|
||||
*/
|
||||
struct jbd2_inode {
|
||||
|
|
@ -944,8 +945,9 @@ struct journal_s
|
|||
/**
|
||||
* @j_fc_off:
|
||||
*
|
||||
* Number of fast commit blocks currently allocated.
|
||||
* [j_state_lock].
|
||||
* Number of fast commit blocks currently allocated. Accessed only
|
||||
* during fast commit. Currently only process can do fast commit, so
|
||||
* this field is not protected by any lock.
|
||||
*/
|
||||
unsigned long j_fc_off;
|
||||
|
||||
|
|
@ -988,9 +990,9 @@ struct journal_s
|
|||
struct block_device *j_fs_dev;
|
||||
|
||||
/**
|
||||
* @j_maxlen: Total maximum capacity of the journal region on disk.
|
||||
* @j_total_len: Total maximum capacity of the journal region on disk.
|
||||
*/
|
||||
unsigned int j_maxlen;
|
||||
unsigned int j_total_len;
|
||||
|
||||
/**
|
||||
* @j_reserved_credits:
|
||||
|
|
@ -1108,8 +1110,9 @@ struct journal_s
|
|||
struct buffer_head **j_wbuf;
|
||||
|
||||
/**
|
||||
* @j_fc_wbuf: Array of fast commit bhs for
|
||||
* jbd2_journal_commit_transaction.
|
||||
* @j_fc_wbuf: Array of fast commit bhs for fast commit. Accessed only
|
||||
* during a fast commit. Currently only process can do fast commit, so
|
||||
* this field is not protected by any lock.
|
||||
*/
|
||||
struct buffer_head **j_fc_wbuf;
|
||||
|
||||
|
|
@ -1614,16 +1617,20 @@ extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
|
|||
extern int jbd2_cleanup_journal_tail(journal_t *);
|
||||
|
||||
/* Fast commit related APIs */
|
||||
int jbd2_fc_init(journal_t *journal, int num_fc_blks);
|
||||
int jbd2_fc_begin_commit(journal_t *journal, tid_t tid);
|
||||
int jbd2_fc_end_commit(journal_t *journal);
|
||||
int jbd2_fc_end_commit_fallback(journal_t *journal, tid_t tid);
|
||||
int jbd2_fc_end_commit_fallback(journal_t *journal);
|
||||
int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out);
|
||||
int jbd2_submit_inode_data(struct jbd2_inode *jinode);
|
||||
int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode);
|
||||
int jbd2_fc_wait_bufs(journal_t *journal, int num_blks);
|
||||
int jbd2_fc_release_bufs(journal_t *journal);
|
||||
|
||||
static inline int jbd2_journal_get_max_txn_bufs(journal_t *journal)
|
||||
{
|
||||
return (journal->j_total_len - journal->j_fc_wbufsize) / 4;
|
||||
}
|
||||
|
||||
/*
|
||||
* is_journal_abort
|
||||
*
|
||||
|
|
|
|||
|
|
@ -900,12 +900,19 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
|
|||
static inline void memcg_memory_event(struct mem_cgroup *memcg,
|
||||
enum memcg_memory_event event)
|
||||
{
|
||||
bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
|
||||
event == MEMCG_SWAP_FAIL;
|
||||
|
||||
atomic_long_inc(&memcg->memory_events_local[event]);
|
||||
cgroup_file_notify(&memcg->events_local_file);
|
||||
if (!swap_event)
|
||||
cgroup_file_notify(&memcg->events_local_file);
|
||||
|
||||
do {
|
||||
atomic_long_inc(&memcg->memory_events[event]);
|
||||
cgroup_file_notify(&memcg->events_file);
|
||||
if (swap_event)
|
||||
cgroup_file_notify(&memcg->swap_events_file);
|
||||
else
|
||||
cgroup_file_notify(&memcg->events_file);
|
||||
|
||||
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -281,20 +281,6 @@ static inline bool movable_node_is_enabled(void)
|
|||
}
|
||||
#endif /* ! CONFIG_MEMORY_HOTPLUG */
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern int memory_add_physaddr_to_nid(u64 start);
|
||||
extern int phys_to_target_node(u64 start);
|
||||
#else
|
||||
static inline int memory_add_physaddr_to_nid(u64 start)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int phys_to_target_node(u64 start)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
|
||||
/*
|
||||
* pgdat resizing functions
|
||||
|
|
|
|||
|
|
@ -2759,6 +2759,15 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
|
|||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
|
||||
#ifndef io_remap_pfn_range
|
||||
static inline int io_remap_pfn_range(struct vm_area_struct *vma,
|
||||
unsigned long addr, unsigned long pfn,
|
||||
unsigned long size, pgprot_t prot)
|
||||
{
|
||||
return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline vm_fault_t vmf_error(int err)
|
||||
{
|
||||
if (err == -ENOMEM)
|
||||
|
|
|
|||
|
|
@ -24,6 +24,12 @@ struct nfnl_callback {
|
|||
const u_int16_t attr_count; /* number of nlattr's */
|
||||
};
|
||||
|
||||
enum nfnl_abort_action {
|
||||
NFNL_ABORT_NONE = 0,
|
||||
NFNL_ABORT_AUTOLOAD,
|
||||
NFNL_ABORT_VALIDATE,
|
||||
};
|
||||
|
||||
struct nfnetlink_subsystem {
|
||||
const char *name;
|
||||
__u8 subsys_id; /* nfnetlink subsystem ID */
|
||||
|
|
@ -31,7 +37,8 @@ struct nfnetlink_subsystem {
|
|||
const struct nfnl_callback *cb; /* callback for individual types */
|
||||
struct module *owner;
|
||||
int (*commit)(struct net *net, struct sk_buff *skb);
|
||||
int (*abort)(struct net *net, struct sk_buff *skb, bool autoload);
|
||||
int (*abort)(struct net *net, struct sk_buff *skb,
|
||||
enum nfnl_abort_action action);
|
||||
void (*cleanup)(struct net *net);
|
||||
bool (*valid_genid)(struct net *net, u32 genid);
|
||||
};
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ struct ip_rt_info {
|
|||
u_int32_t mark;
|
||||
};
|
||||
|
||||
int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type);
|
||||
int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned addr_type);
|
||||
|
||||
struct nf_queue_entry;
|
||||
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ struct nf_ipv6_ops {
|
|||
#if IS_MODULE(CONFIG_IPV6)
|
||||
int (*chk_addr)(struct net *net, const struct in6_addr *addr,
|
||||
const struct net_device *dev, int strict);
|
||||
int (*route_me_harder)(struct net *net, struct sk_buff *skb);
|
||||
int (*route_me_harder)(struct net *net, struct sock *sk, struct sk_buff *skb);
|
||||
int (*dev_get_saddr)(struct net *net, const struct net_device *dev,
|
||||
const struct in6_addr *daddr, unsigned int srcprefs,
|
||||
struct in6_addr *saddr);
|
||||
|
|
@ -143,9 +143,9 @@ static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk,
|
|||
#endif
|
||||
}
|
||||
|
||||
int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
|
||||
int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb)
|
||||
static inline int nf_ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
#if IS_MODULE(CONFIG_IPV6)
|
||||
const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
|
||||
|
|
@ -153,9 +153,9 @@ static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb)
|
|||
if (!v6_ops)
|
||||
return -EHOSTUNREACH;
|
||||
|
||||
return v6_ops->route_me_harder(net, skb);
|
||||
return v6_ops->route_me_harder(net, sk, skb);
|
||||
#elif IS_BUILTIN(CONFIG_IPV6)
|
||||
return ip6_route_me_harder(net, skb);
|
||||
return ip6_route_me_harder(net, sk, skb);
|
||||
#else
|
||||
return -EHOSTUNREACH;
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -21,13 +21,41 @@
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
#include <linux/printk.h>
|
||||
#include <asm/sparsemem.h>
|
||||
|
||||
/* Generic implementation available */
|
||||
int numa_map_to_online_node(int node);
|
||||
#else
|
||||
|
||||
#ifndef memory_add_physaddr_to_nid
|
||||
static inline int memory_add_physaddr_to_nid(u64 start)
|
||||
{
|
||||
pr_info_once("Unknown online node for memory at 0x%llx, assuming node 0\n",
|
||||
start);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#ifndef phys_to_target_node
|
||||
static inline int phys_to_target_node(u64 start)
|
||||
{
|
||||
pr_info_once("Unknown target node for memory at 0x%llx, assuming node 0\n",
|
||||
start);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#else /* !CONFIG_NUMA */
|
||||
static inline int numa_map_to_online_node(int node)
|
||||
{
|
||||
return NUMA_NO_NODE;
|
||||
}
|
||||
static inline int memory_add_physaddr_to_nid(u64 start)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int phys_to_target_node(u64 start)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_NUMA_H */
|
||||
|
|
|
|||
|
|
@ -344,9 +344,9 @@ static inline struct page *find_get_page_flags(struct address_space *mapping,
|
|||
/**
|
||||
* find_lock_page - locate, pin and lock a pagecache page
|
||||
* @mapping: the address_space to search
|
||||
* @offset: the page index
|
||||
* @index: the page index
|
||||
*
|
||||
* Looks up the page cache entry at @mapping & @offset. If there is a
|
||||
* Looks up the page cache entry at @mapping & @index. If there is a
|
||||
* page cache page, it is returned locked and with an increased
|
||||
* refcount.
|
||||
*
|
||||
|
|
@ -363,9 +363,9 @@ static inline struct page *find_lock_page(struct address_space *mapping,
|
|||
/**
|
||||
* find_lock_head - Locate, pin and lock a pagecache page.
|
||||
* @mapping: The address_space to search.
|
||||
* @offset: The page index.
|
||||
* @index: The page index.
|
||||
*
|
||||
* Looks up the page cache entry at @mapping & @offset. If there is a
|
||||
* Looks up the page cache entry at @mapping & @index. If there is a
|
||||
* page cache page, its head page is returned locked and with an increased
|
||||
* refcount.
|
||||
*
|
||||
|
|
@ -906,6 +906,8 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
|
|||
xas_set(&xas, rac->_index);
|
||||
rcu_read_lock();
|
||||
xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
|
||||
if (xas_retry(&xas, page))
|
||||
continue;
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(PageTail(page), page);
|
||||
array[i++] = page;
|
||||
|
|
|
|||
|
|
@ -1022,13 +1022,7 @@ struct perf_sample_data {
|
|||
struct perf_callchain_entry *callchain;
|
||||
u64 aux_size;
|
||||
|
||||
/*
|
||||
* regs_user may point to task_pt_regs or to regs_user_copy, depending
|
||||
* on arch details.
|
||||
*/
|
||||
struct perf_regs regs_user;
|
||||
struct pt_regs regs_user_copy;
|
||||
|
||||
struct perf_regs regs_intr;
|
||||
u64 stack_user_size;
|
||||
|
||||
|
|
@ -1400,11 +1394,14 @@ perf_event_addr_filters(struct perf_event *event)
|
|||
extern void perf_event_addr_filters_sync(struct perf_event *event);
|
||||
|
||||
extern int perf_output_begin(struct perf_output_handle *handle,
|
||||
struct perf_sample_data *data,
|
||||
struct perf_event *event, unsigned int size);
|
||||
extern int perf_output_begin_forward(struct perf_output_handle *handle,
|
||||
struct perf_event *event,
|
||||
unsigned int size);
|
||||
struct perf_sample_data *data,
|
||||
struct perf_event *event,
|
||||
unsigned int size);
|
||||
extern int perf_output_begin_backward(struct perf_output_handle *handle,
|
||||
struct perf_sample_data *data,
|
||||
struct perf_event *event,
|
||||
unsigned int size);
|
||||
|
||||
|
|
|
|||
|
|
@ -20,8 +20,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx);
|
|||
int perf_reg_validate(u64 mask);
|
||||
u64 perf_reg_abi(struct task_struct *task);
|
||||
void perf_get_regs_user(struct perf_regs *regs_user,
|
||||
struct pt_regs *regs,
|
||||
struct pt_regs *regs_user_copy);
|
||||
struct pt_regs *regs);
|
||||
#else
|
||||
|
||||
#define PERF_REG_EXTENDED_MASK 0
|
||||
|
|
@ -42,8 +41,7 @@ static inline u64 perf_reg_abi(struct task_struct *task)
|
|||
}
|
||||
|
||||
static inline void perf_get_regs_user(struct perf_regs *regs_user,
|
||||
struct pt_regs *regs,
|
||||
struct pt_regs *regs_user_copy)
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
regs_user->regs = task_pt_regs(current);
|
||||
regs_user->abi = perf_reg_abi(current);
|
||||
|
|
|
|||
|
|
@ -1427,10 +1427,6 @@ typedef unsigned int pgtbl_mod_mask;
|
|||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#ifndef io_remap_pfn_range
|
||||
#define io_remap_pfn_range remap_pfn_range
|
||||
#endif
|
||||
|
||||
#ifndef has_transparent_hugepage
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#define has_transparent_hugepage() 1
|
||||
|
|
|
|||
|
|
@ -147,16 +147,8 @@ typedef enum {
|
|||
PHY_INTERFACE_MODE_MAX,
|
||||
} phy_interface_t;
|
||||
|
||||
/**
|
||||
/*
|
||||
* phy_supported_speeds - return all speeds currently supported by a PHY device
|
||||
* @phy: The PHY device to return supported speeds of.
|
||||
* @speeds: buffer to store supported speeds in.
|
||||
* @size: size of speeds buffer.
|
||||
*
|
||||
* Description: Returns the number of supported speeds, and fills
|
||||
* the speeds buffer with the supported speeds. If speeds buffer is
|
||||
* too small to contain all currently supported speeds, will return as
|
||||
* many speeds as can fit.
|
||||
*/
|
||||
unsigned int phy_supported_speeds(struct phy_device *phy,
|
||||
unsigned int *speeds,
|
||||
|
|
@ -1022,14 +1014,9 @@ static inline int __phy_modify_changed(struct phy_device *phydev, u32 regnum,
|
|||
regnum, mask, set);
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* phy_read_mmd - Convenience function for reading a register
|
||||
* from an MMD on a given PHY.
|
||||
* @phydev: The phy_device struct
|
||||
* @devad: The MMD to read from
|
||||
* @regnum: The register on the MMD to read
|
||||
*
|
||||
* Same rules as for phy_read();
|
||||
*/
|
||||
int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
|
||||
|
||||
|
|
@ -1064,38 +1051,21 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
|
|||
__ret; \
|
||||
})
|
||||
|
||||
/**
|
||||
/*
|
||||
* __phy_read_mmd - Convenience function for reading a register
|
||||
* from an MMD on a given PHY.
|
||||
* @phydev: The phy_device struct
|
||||
* @devad: The MMD to read from
|
||||
* @regnum: The register on the MMD to read
|
||||
*
|
||||
* Same rules as for __phy_read();
|
||||
*/
|
||||
int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
|
||||
|
||||
/**
|
||||
/*
|
||||
* phy_write_mmd - Convenience function for writing a register
|
||||
* on an MMD on a given PHY.
|
||||
* @phydev: The phy_device struct
|
||||
* @devad: The MMD to write to
|
||||
* @regnum: The register on the MMD to read
|
||||
* @val: value to write to @regnum
|
||||
*
|
||||
* Same rules as for phy_write();
|
||||
*/
|
||||
int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
|
||||
|
||||
/**
|
||||
/*
|
||||
* __phy_write_mmd - Convenience function for writing a register
|
||||
* on an MMD on a given PHY.
|
||||
* @phydev: The phy_device struct
|
||||
* @devad: The MMD to write to
|
||||
* @regnum: The register on the MMD to read
|
||||
* @val: value to write to @regnum
|
||||
*
|
||||
* Same rules as for __phy_write();
|
||||
*/
|
||||
int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
|
||||
|
||||
|
|
|
|||
|
|
@ -54,11 +54,10 @@ extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
|
|||
extern void pm_runtime_update_max_time_suspended(struct device *dev,
|
||||
s64 delta_ns);
|
||||
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
|
||||
extern void pm_runtime_clean_up_links(struct device *dev);
|
||||
extern void pm_runtime_get_suppliers(struct device *dev);
|
||||
extern void pm_runtime_put_suppliers(struct device *dev);
|
||||
extern void pm_runtime_new_link(struct device *dev);
|
||||
extern void pm_runtime_drop_link(struct device *dev);
|
||||
extern void pm_runtime_drop_link(struct device_link *link);
|
||||
|
||||
/**
|
||||
* pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
|
||||
|
|
@ -276,11 +275,10 @@ static inline u64 pm_runtime_autosuspend_expiration(
|
|||
struct device *dev) { return 0; }
|
||||
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
|
||||
bool enable){}
|
||||
static inline void pm_runtime_clean_up_links(struct device *dev) {}
|
||||
static inline void pm_runtime_get_suppliers(struct device *dev) {}
|
||||
static inline void pm_runtime_put_suppliers(struct device *dev) {}
|
||||
static inline void pm_runtime_new_link(struct device *dev) {}
|
||||
static inline void pm_runtime_drop_link(struct device *dev) {}
|
||||
static inline void pm_runtime_drop_link(struct device_link *link) {}
|
||||
|
||||
#endif /* !CONFIG_PM */
|
||||
|
||||
|
|
@ -388,6 +386,27 @@ static inline int pm_runtime_get_sync(struct device *dev)
|
|||
return __pm_runtime_resume(dev, RPM_GET_PUT);
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_runtime_resume_and_get - Bump up usage counter of a device and resume it.
|
||||
* @dev: Target device.
|
||||
*
|
||||
* Resume @dev synchronously and if that is successful, increment its runtime
|
||||
* PM usage counter. Return 0 if the runtime PM usage counter of @dev has been
|
||||
* incremented or a negative error code otherwise.
|
||||
*/
|
||||
static inline int pm_runtime_resume_and_get(struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = __pm_runtime_resume(dev, RPM_GET_PUT);
|
||||
if (ret < 0) {
|
||||
pm_runtime_put_noidle(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* pm_runtime_put - Drop device usage counter and queue up "idle check" if 0.
|
||||
* @dev: Target device.
|
||||
|
|
|
|||
|
|
@ -147,24 +147,6 @@ static inline unsigned int refcount_read(const refcount_t *r)
|
|||
return atomic_read(&r->refs);
|
||||
}
|
||||
|
||||
/**
|
||||
* refcount_add_not_zero - add a value to a refcount unless it is 0
|
||||
* @i: the value to add to the refcount
|
||||
* @r: the refcount
|
||||
*
|
||||
* Will saturate at REFCOUNT_SATURATED and WARN.
|
||||
*
|
||||
* Provides no memory ordering, it is assumed the caller has guaranteed the
|
||||
* object memory to be stable (RCU, etc.). It does provide a control dependency
|
||||
* and thereby orders future stores. See the comment on top.
|
||||
*
|
||||
* Use of this function is not recommended for the normal reference counting
|
||||
* use case in which references are taken and released one at a time. In these
|
||||
* cases, refcount_inc(), or one of its variants, should instead be used to
|
||||
* increment a reference count.
|
||||
*
|
||||
* Return: false if the passed refcount is 0, true otherwise
|
||||
*/
|
||||
static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
|
||||
{
|
||||
int old = refcount_read(r);
|
||||
|
|
@ -183,11 +165,42 @@ static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, in
|
|||
return old;
|
||||
}
|
||||
|
||||
/**
|
||||
* refcount_add_not_zero - add a value to a refcount unless it is 0
|
||||
* @i: the value to add to the refcount
|
||||
* @r: the refcount
|
||||
*
|
||||
* Will saturate at REFCOUNT_SATURATED and WARN.
|
||||
*
|
||||
* Provides no memory ordering, it is assumed the caller has guaranteed the
|
||||
* object memory to be stable (RCU, etc.). It does provide a control dependency
|
||||
* and thereby orders future stores. See the comment on top.
|
||||
*
|
||||
* Use of this function is not recommended for the normal reference counting
|
||||
* use case in which references are taken and released one at a time. In these
|
||||
* cases, refcount_inc(), or one of its variants, should instead be used to
|
||||
* increment a reference count.
|
||||
*
|
||||
* Return: false if the passed refcount is 0, true otherwise
|
||||
*/
|
||||
static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
|
||||
{
|
||||
return __refcount_add_not_zero(i, r, NULL);
|
||||
}
|
||||
|
||||
static inline void __refcount_add(int i, refcount_t *r, int *oldp)
|
||||
{
|
||||
int old = atomic_fetch_add_relaxed(i, &r->refs);
|
||||
|
||||
if (oldp)
|
||||
*oldp = old;
|
||||
|
||||
if (unlikely(!old))
|
||||
refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
|
||||
else if (unlikely(old < 0 || old + i < 0))
|
||||
refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
|
||||
}
|
||||
|
||||
/**
|
||||
* refcount_add - add a value to a refcount
|
||||
* @i: the value to add to the refcount
|
||||
|
|
@ -204,24 +217,16 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
|
|||
* cases, refcount_inc(), or one of its variants, should instead be used to
|
||||
* increment a reference count.
|
||||
*/
|
||||
static inline void __refcount_add(int i, refcount_t *r, int *oldp)
|
||||
{
|
||||
int old = atomic_fetch_add_relaxed(i, &r->refs);
|
||||
|
||||
if (oldp)
|
||||
*oldp = old;
|
||||
|
||||
if (unlikely(!old))
|
||||
refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
|
||||
else if (unlikely(old < 0 || old + i < 0))
|
||||
refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
|
||||
}
|
||||
|
||||
static inline void refcount_add(int i, refcount_t *r)
|
||||
{
|
||||
__refcount_add(i, r, NULL);
|
||||
}
|
||||
|
||||
static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
|
||||
{
|
||||
return __refcount_add_not_zero(1, r, oldp);
|
||||
}
|
||||
|
||||
/**
|
||||
* refcount_inc_not_zero - increment a refcount unless it is 0
|
||||
* @r: the refcount to increment
|
||||
|
|
@ -235,16 +240,16 @@ static inline void refcount_add(int i, refcount_t *r)
|
|||
*
|
||||
* Return: true if the increment was successful, false otherwise
|
||||
*/
|
||||
static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
|
||||
{
|
||||
return __refcount_add_not_zero(1, r, oldp);
|
||||
}
|
||||
|
||||
static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
|
||||
{
|
||||
return __refcount_inc_not_zero(r, NULL);
|
||||
}
|
||||
|
||||
static inline void __refcount_inc(refcount_t *r, int *oldp)
|
||||
{
|
||||
__refcount_add(1, r, oldp);
|
||||
}
|
||||
|
||||
/**
|
||||
* refcount_inc - increment a refcount
|
||||
* @r: the refcount to increment
|
||||
|
|
@ -257,16 +262,29 @@ static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
|
|||
* Will WARN if the refcount is 0, as this represents a possible use-after-free
|
||||
* condition.
|
||||
*/
|
||||
static inline void __refcount_inc(refcount_t *r, int *oldp)
|
||||
{
|
||||
__refcount_add(1, r, oldp);
|
||||
}
|
||||
|
||||
static inline void refcount_inc(refcount_t *r)
|
||||
{
|
||||
__refcount_inc(r, NULL);
|
||||
}
|
||||
|
||||
static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
|
||||
{
|
||||
int old = atomic_fetch_sub_release(i, &r->refs);
|
||||
|
||||
if (oldp)
|
||||
*oldp = old;
|
||||
|
||||
if (old == i) {
|
||||
smp_acquire__after_ctrl_dep();
|
||||
return true;
|
||||
}
|
||||
|
||||
if (unlikely(old < 0 || old - i < 0))
|
||||
refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* refcount_sub_and_test - subtract from a refcount and test if it is 0
|
||||
* @i: amount to subtract from the refcount
|
||||
|
|
@ -287,29 +305,16 @@ static inline void refcount_inc(refcount_t *r)
|
|||
*
|
||||
* Return: true if the resulting refcount is 0, false otherwise
|
||||
*/
|
||||
static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
|
||||
{
|
||||
int old = atomic_fetch_sub_release(i, &r->refs);
|
||||
|
||||
if (oldp)
|
||||
*oldp = old;
|
||||
|
||||
if (old == i) {
|
||||
smp_acquire__after_ctrl_dep();
|
||||
return true;
|
||||
}
|
||||
|
||||
if (unlikely(old < 0 || old - i < 0))
|
||||
refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
|
||||
{
|
||||
return __refcount_sub_and_test(i, r, NULL);
|
||||
}
|
||||
|
||||
static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp)
|
||||
{
|
||||
return __refcount_sub_and_test(1, r, oldp);
|
||||
}
|
||||
|
||||
/**
|
||||
* refcount_dec_and_test - decrement a refcount and test if it is 0
|
||||
* @r: the refcount
|
||||
|
|
@ -323,16 +328,22 @@ static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
|
|||
*
|
||||
* Return: true if the resulting refcount is 0, false otherwise
|
||||
*/
|
||||
static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp)
|
||||
{
|
||||
return __refcount_sub_and_test(1, r, oldp);
|
||||
}
|
||||
|
||||
static inline __must_check bool refcount_dec_and_test(refcount_t *r)
|
||||
{
|
||||
return __refcount_dec_and_test(r, NULL);
|
||||
}
|
||||
|
||||
static inline void __refcount_dec(refcount_t *r, int *oldp)
|
||||
{
|
||||
int old = atomic_fetch_sub_release(1, &r->refs);
|
||||
|
||||
if (oldp)
|
||||
*oldp = old;
|
||||
|
||||
if (unlikely(old <= 1))
|
||||
refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
|
||||
}
|
||||
|
||||
/**
|
||||
* refcount_dec - decrement a refcount
|
||||
* @r: the refcount
|
||||
|
|
@ -343,17 +354,6 @@ static inline __must_check bool refcount_dec_and_test(refcount_t *r)
|
|||
* Provides release memory ordering, such that prior loads and stores are done
|
||||
* before.
|
||||
*/
|
||||
static inline void __refcount_dec(refcount_t *r, int *oldp)
|
||||
{
|
||||
int old = atomic_fetch_sub_release(1, &r->refs);
|
||||
|
||||
if (oldp)
|
||||
*oldp = old;
|
||||
|
||||
if (unlikely(old <= 1))
|
||||
refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
|
||||
}
|
||||
|
||||
static inline void refcount_dec(refcount_t *r)
|
||||
{
|
||||
__refcount_dec(r, NULL);
|
||||
|
|
|
|||
|
|
@ -552,7 +552,6 @@ struct sched_dl_entity {
|
|||
* overruns.
|
||||
*/
|
||||
unsigned int dl_throttled : 1;
|
||||
unsigned int dl_boosted : 1;
|
||||
unsigned int dl_yielded : 1;
|
||||
unsigned int dl_non_contending : 1;
|
||||
unsigned int dl_overrun : 1;
|
||||
|
|
@ -571,6 +570,15 @@ struct sched_dl_entity {
|
|||
* time.
|
||||
*/
|
||||
struct hrtimer inactive_timer;
|
||||
|
||||
#ifdef CONFIG_RT_MUTEXES
|
||||
/*
|
||||
* Priority Inheritance. When a DEADLINE scheduling entity is boosted
|
||||
* pi_se points to the donor, otherwise points to the dl_se it belongs
|
||||
* to (the original one/itself).
|
||||
*/
|
||||
struct sched_dl_entity *pi_se;
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_UCLAMP_TASK
|
||||
|
|
@ -770,7 +778,6 @@ struct task_struct {
|
|||
unsigned sched_reset_on_fork:1;
|
||||
unsigned sched_contributes_to_load:1;
|
||||
unsigned sched_migrated:1;
|
||||
unsigned sched_remote_wakeup:1;
|
||||
#ifdef CONFIG_PSI
|
||||
unsigned sched_psi_wake_requeue:1;
|
||||
#endif
|
||||
|
|
@ -780,6 +787,21 @@ struct task_struct {
|
|||
|
||||
/* Unserialized, strictly 'current' */
|
||||
|
||||
/*
|
||||
* This field must not be in the scheduler word above due to wakelist
|
||||
* queueing no longer being serialized by p->on_cpu. However:
|
||||
*
|
||||
* p->XXX = X; ttwu()
|
||||
* schedule() if (p->on_rq && ..) // false
|
||||
* smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true
|
||||
* deactivate_task() ttwu_queue_wakelist())
|
||||
* p->on_rq = 0; p->sched_remote_wakeup = Y;
|
||||
*
|
||||
* guarantees all stores of 'current' are visible before
|
||||
* ->sched_remote_wakeup gets used, so it can be in this word.
|
||||
*/
|
||||
unsigned sched_remote_wakeup:1;
|
||||
|
||||
/* Bit to tell LSMs we're in execve(): */
|
||||
unsigned in_execve:1;
|
||||
unsigned in_iowait:1;
|
||||
|
|
|
|||
|
|
@ -107,6 +107,7 @@ void seq_pad(struct seq_file *m, char c);
|
|||
char *mangle_path(char *s, const char *p, const char *esc);
|
||||
int seq_open(struct file *, const struct seq_operations *);
|
||||
ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
|
||||
ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter);
|
||||
loff_t seq_lseek(struct file *, loff_t, int);
|
||||
int seq_release(struct inode *, struct file *);
|
||||
int seq_write(struct seq_file *seq, const void *data, size_t len);
|
||||
|
|
|
|||
|
|
@ -734,6 +734,25 @@ static inline struct spi_controller *spi_alloc_slave(struct device *host,
|
|||
return __spi_alloc_controller(host, size, true);
|
||||
}
|
||||
|
||||
struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
|
||||
unsigned int size,
|
||||
bool slave);
|
||||
|
||||
static inline struct spi_controller *devm_spi_alloc_master(struct device *dev,
|
||||
unsigned int size)
|
||||
{
|
||||
return __devm_spi_alloc_controller(dev, size, false);
|
||||
}
|
||||
|
||||
static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev,
|
||||
unsigned int size)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_SPI_SLAVE))
|
||||
return NULL;
|
||||
|
||||
return __devm_spi_alloc_controller(dev, size, true);
|
||||
}
|
||||
|
||||
extern int spi_register_controller(struct spi_controller *ctlr);
|
||||
extern int devm_spi_register_controller(struct device *dev,
|
||||
struct spi_controller *ctlr);
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@
|
|||
#include <linux/dma-direction.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/limits.h>
|
||||
|
||||
struct device;
|
||||
struct page;
|
||||
|
|
@ -45,13 +46,9 @@ enum dma_sync_target {
|
|||
SYNC_FOR_DEVICE = 1,
|
||||
};
|
||||
|
||||
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
|
||||
dma_addr_t tbl_dma_addr,
|
||||
phys_addr_t phys,
|
||||
size_t mapping_size,
|
||||
size_t alloc_size,
|
||||
enum dma_data_direction dir,
|
||||
unsigned long attrs);
|
||||
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
|
||||
size_t mapping_size, size_t alloc_size,
|
||||
enum dma_data_direction dir, unsigned long attrs);
|
||||
|
||||
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
|
||||
phys_addr_t tlb_addr,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue