Merge branches 'iommu/fixes', 'arm/exynos', 'arm/renesas', 'arm/smmu', 'arm/mediatek', 'arm/core', 'x86/vt-d' and 'core' into next

This commit is contained in:
Joerg Roedel 2017-02-10 15:13:10 +01:00
1244 changed files with 13520 additions and 8038 deletions

View file

@ -739,7 +739,7 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
}
}
static inline unsigned int blk_queue_zone_size(struct request_queue *q)
static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
{
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
@ -1000,6 +1000,19 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> 9;
}
/*
* Some commands like WRITE SAME have a payload or data transfer size which
* is different from the size of the request. Any driver that supports such
* commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
* calculate the data transfer size.
*/
static inline unsigned int blk_rq_payload_bytes(struct request *rq)
{
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
return rq->special_vec.bv_len;
return blk_rq_bytes(rq);
}
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
int op)
{
@ -1536,12 +1549,12 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
return false;
}
static inline unsigned int bdev_zone_size(struct block_device *bdev)
static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
if (q)
return blk_queue_zone_size(q);
return blk_queue_zone_sectors(q);
return 0;
}

View file

@ -216,7 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
int bpf_prog_calc_digest(struct bpf_prog *fp);
int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@ -247,6 +247,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
void *bpf_map_area_alloc(size_t size);
void bpf_map_area_free(void *base);
extern int sysctl_unprivileged_bpf_disabled;

View file

@ -45,10 +45,9 @@ struct can_proto {
extern int can_proto_register(const struct can_proto *cp);
extern void can_proto_unregister(const struct can_proto *cp);
extern int can_rx_register(struct net_device *dev, canid_t can_id,
canid_t mask,
void (*func)(struct sk_buff *, void *),
void *data, char *ident);
int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
void (*func)(struct sk_buff *, void *),
void *data, char *ident, struct sock *sk);
extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
canid_t mask,

View file

@ -14,6 +14,7 @@ struct coredump_params;
extern int dump_skip(struct coredump_params *cprm, size_t nr);
extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
extern void dump_truncate(struct coredump_params *cprm);
#ifdef CONFIG_COREDUMP
extern void do_coredump(const siginfo_t *siginfo);
#else

View file

@ -8,9 +8,7 @@ enum cpuhp_state {
CPUHP_CREATE_THREADS,
CPUHP_PERF_PREPARE,
CPUHP_PERF_X86_PREPARE,
CPUHP_PERF_X86_UNCORE_PREP,
CPUHP_PERF_X86_AMD_UNCORE_PREP,
CPUHP_PERF_X86_RAPL_PREP,
CPUHP_PERF_BFIN,
CPUHP_PERF_POWER,
CPUHP_PERF_SUPERH,
@ -74,6 +72,8 @@ enum cpuhp_state {
CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_DEAD,
CPUHP_MIPS_SOC_PREPARE,
CPUHP_BP_PREPARE_DYN,
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE,
@ -84,7 +84,6 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_UNCORE_STARTING,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,

View file

@ -27,6 +27,7 @@ int iommu_dma_init(void);
/* Domain management interface for IOMMU drivers */
int iommu_get_dma_cookie(struct iommu_domain *domain);
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
void iommu_put_dma_cookie(struct iommu_domain *domain);
/* Setup call for arch DMA mapping code */
@ -34,7 +35,8 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev);
/* General helpers for DMA-API <-> IOMMU-API interaction */
int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
unsigned long attrs);
/*
* These implement the bulk of the relevant DMA mapping callbacks, but require
@ -65,7 +67,6 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs);
int iommu_dma_supported(struct device *dev, u64 mask);
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
/* The DMA API isn't _quite_ the whole story, though... */
@ -86,6 +87,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
return -ENODEV;
}
static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
{
return -ENODEV;
}
static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
{
}

View file

@ -62,6 +62,13 @@
*/
#define DMA_ATTR_NO_WARN (1UL << 8)
/*
* DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
* accessible at an elevated privilege level (and ideally inaccessible or
* at least read-only at lesser-privileged levels).
*/
#define DMA_ATTR_PRIVILEGED (1UL << 9)
/*
* A dma_addr_t can hold any valid DMA or bus address for the platform.
* It can be given to a device to use as a DMA source or target. A CPU cannot

View file

@ -103,6 +103,7 @@ typedef struct {
#define EFI_PAGE_SHIFT 12
#define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT)
#define EFI_PAGES_MAX (U64_MAX >> EFI_PAGE_SHIFT)
typedef struct {
u32 type;
@ -950,6 +951,7 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries);
extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
extern void __init efi_memmap_unmap(void);

View file

@ -43,12 +43,19 @@ extern struct module __this_module;
#ifdef CONFIG_MODVERSIONS
/* Mark the CRC weak since genksyms apparently decides not to
* generate a checksums for some symbols */
#if defined(CONFIG_MODULE_REL_CRCS)
#define __CRC_SYMBOL(sym, sec) \
extern __visible void *__crc_##sym __attribute__((weak)); \
static const unsigned long __kcrctab_##sym \
__used \
__attribute__((section("___kcrctab" sec "+" #sym), used)) \
= (unsigned long) &__crc_##sym;
asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
" .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
" .long " VMLINUX_SYMBOL_STR(__crc_##sym) " - . \n" \
" .previous \n");
#else
#define __CRC_SYMBOL(sym, sec) \
asm(" .section \"___kcrctab" sec "+" #sym "\", \"a\" \n" \
" .weak " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
" .long " VMLINUX_SYMBOL_STR(__crc_##sym) " \n" \
" .previous \n");
#endif
#else
#define __CRC_SYMBOL(sym, sec)
#endif

View file

@ -57,6 +57,8 @@ struct bpf_prog_aux;
/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512
#define BPF_TAG_SIZE 8
/* Helper macros for filter block array initializers. */
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
@ -408,7 +410,7 @@ struct bpf_prog {
kmemcheck_bitfield_end(meta);
enum bpf_prog_type type; /* Type of BPF program */
u32 len; /* Number of filter blocks */
u32 digest[SHA_DIGEST_WORDS]; /* Program digest */
u8 tag[BPF_TAG_SIZE];
struct bpf_prog_aux *aux; /* Auxiliary fields */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
unsigned int (*bpf_func)(const void *ctx,
@ -519,7 +521,7 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
return prog->len * sizeof(struct bpf_insn);
}
static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog)
static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
{
return round_up(bpf_prog_insn_size(prog) +
sizeof(__be64) + 1, SHA_MESSAGE_BYTES);

View file

@ -360,6 +360,7 @@ struct fscache_object {
#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
struct list_head cache_link; /* link in cache->object_list */
struct hlist_node cookie_link; /* link in cookie->backing_objects */

View file

@ -323,8 +323,6 @@ extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(str
extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode);
/* find (and take a reference) to a mark associated with group and vfsmount */
extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt);
/* copy the values from old into new */
extern void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old);
/* set the ignored_mask of a mark */
extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask);
/* set the mask of a mark (might pin the object into memory */

View file

@ -146,15 +146,6 @@ enum {
DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
};
#define BLK_SCSI_MAX_CMDS (256)
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
struct blk_scsi_cmd_filter {
unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
struct kobject kobj;
};
struct disk_part_tbl {
struct rcu_head rcu_head;
int len;

View file

@ -38,9 +38,8 @@ struct vm_area_struct;
#define ___GFP_ACCOUNT 0x100000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_DIRECT_RECLAIM 0x400000u
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
#define ___GFP_KSWAPD_RECLAIM 0x2000000u
#define ___GFP_WRITE 0x800000u
#define ___GFP_KSWAPD_RECLAIM 0x1000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
@ -172,11 +171,6 @@ struct vm_area_struct;
* __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
* distinguishing in the source between false positives and allocations that
* cannot be supported (e.g. page tables).
*
* __GFP_OTHER_NODE is for allocations that are on a remote node but that
* should not be accounted for as a remote allocation in vmstat. A
* typical user would be khugepaged collapsing a huge page on a remote
* node.
*/
#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
@ -184,10 +178,9 @@ struct vm_area_struct;
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
/* Room for N __GFP_FOO bits */
#define __GFP_BITS_SHIFT 26
#define __GFP_BITS_SHIFT 25
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/*
@ -506,11 +499,10 @@ extern void free_hot_cold_page(struct page *page, bool cold);
extern void free_hot_cold_page_list(struct list_head *list, bool cold);
struct page_frag_cache;
extern void __page_frag_drain(struct page *page, unsigned int order,
unsigned int count);
extern void *__alloc_page_frag(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask);
extern void __free_page_frag(void *addr);
extern void __page_frag_cache_drain(struct page *page, unsigned int count);
extern void *page_frag_alloc(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask);
extern void page_frag_free(void *addr);
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)

View file

@ -274,37 +274,67 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
int parent_irq);
int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type,
bool nested,
struct lock_class_key *lock_key);
int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type,
bool nested,
struct lock_class_key *lock_key);
#ifdef CONFIG_LOCKDEP
/*
* Lockdep requires that each irqchip instance be created with a
* unique key so as to avoid unnecessary warnings. This upfront
* boilerplate static inlines provides such a key for each
* unique instance.
*/
static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type)
{
static struct lock_class_key key;
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
handler, type, false, &key);
}
/* FIXME: I assume threaded IRQchips do not have the lockdep problem */
static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type)
{
return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq,
handler, type, true, NULL);
static struct lock_class_key key;
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
handler, type, true, &key);
}
#else
static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type)
{
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
handler, type, false, NULL);
}
#ifdef CONFIG_LOCKDEP
#define gpiochip_irqchip_add(...) \
( \
({ \
static struct lock_class_key _key; \
_gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \
}) \
)
#else
#define gpiochip_irqchip_add(...) \
_gpiochip_irqchip_add(__VA_ARGS__, false, NULL)
#endif
static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type)
{
return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
handler, type, true, NULL);
}
#endif /* CONFIG_LOCKDEP */
#endif /* CONFIG_GPIOLIB_IRQCHIP */

View file

@ -128,6 +128,7 @@ struct hv_ring_buffer_info {
u32 ring_data_startoffset;
u32 priv_write_index;
u32 priv_read_index;
u32 cached_read_index;
};
/*
@ -180,6 +181,19 @@ static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
return write;
}
static inline u32 hv_get_cached_bytes_to_write(
const struct hv_ring_buffer_info *rbi)
{
u32 read_loc, write_loc, dsize, write;
dsize = rbi->ring_datasize;
read_loc = rbi->cached_read_index;
write_loc = rbi->ring_buffer->write_index;
write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
read_loc - write_loc;
return write;
}
/*
* VMBUS version is 32 bit entity broken up into
* two 16 bit quantities: major_number. minor_number.
@ -1488,7 +1502,7 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
static inline void hv_signal_on_read(struct vmbus_channel *channel)
{
u32 cur_write_sz;
u32 cur_write_sz, cached_write_sz;
u32 pending_sz;
struct hv_ring_buffer_info *rbi = &channel->inbound;
@ -1512,12 +1526,24 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel)
cur_write_sz = hv_get_bytes_to_write(rbi);
if (cur_write_sz >= pending_sz)
if (cur_write_sz < pending_sz)
return;
cached_write_sz = hv_get_cached_bytes_to_write(rbi);
if (cached_write_sz < pending_sz)
vmbus_setevent(channel);
return;
}
static inline void
init_cached_read_index(struct vmbus_channel *channel)
{
struct hv_ring_buffer_info *rbi = &channel->inbound;
rbi->cached_read_index = rbi->ring_buffer->read_index;
}
/*
* An API to support in-place processing of incoming VMBUS packets.
*/
@ -1569,6 +1595,8 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
* This call commits the read index and potentially signals the host.
* Here is the pattern for using the "in-place" consumption APIs:
*
* init_cached_read_index();
*
* while (get_next_pkt_raw() {
* process the packet "in-place";
* put_pkt_raw();

View file

@ -665,6 +665,7 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
/* Must equal I2C_M_TEN below */
#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */
#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
/* Must match I2C_M_STOP|IGNORE_NAK */

View file

@ -115,6 +115,16 @@ struct st_sensor_bdu {
u8 mask;
};
/**
* struct st_sensor_das - ST sensor device data alignment selection
* @addr: address of the register.
* @mask: mask to write the das flag for left alignment.
*/
struct st_sensor_das {
u8 addr;
u8 mask;
};
/**
* struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt
* @addr: address of the register.
@ -185,6 +195,7 @@ struct st_sensor_transfer_function {
* @enable_axis: Enable one or more axis of the sensor.
* @fs: Full scale register and full scale list available.
* @bdu: Block data update register.
* @das: Data Alignment Selection register.
* @drdy_irq: Data ready register of the sensor.
* @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
* @bootime: samples to discard when sensor passing from power-down to power-up.
@ -200,6 +211,7 @@ struct st_sensor_settings {
struct st_sensor_axis enable_axis;
struct st_sensor_fullscale fs;
struct st_sensor_bdu bdu;
struct st_sensor_das das;
struct st_sensor_data_ready_irq drdy_irq;
bool multi_read_bit;
unsigned int bootime;

View file

@ -29,6 +29,7 @@
#include <linux/dma_remapping.h>
#include <linux/mmu_notifier.h>
#include <linux/list.h>
#include <linux/iommu.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@ -153,8 +154,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
@ -164,9 +165,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
/* INVALID_DESC */
#define DMA_CCMD_INVL_GRANU_OFFSET 61
#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3)
#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3)
#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3)
#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
@ -316,8 +317,8 @@ enum {
#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
#define QI_DEV_EIOTLB_GLOB(g) ((u64)g)
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
#define QI_DEV_EIOTLB_QDEP(qd) (((qd) & 0x1f) << 16)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
#define QI_DEV_EIOTLB_MAX_INVS 32
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
@ -439,7 +440,7 @@ struct intel_iommu {
struct irq_domain *ir_domain;
struct irq_domain *ir_msi_domain;
#endif
struct device *iommu_dev; /* IOMMU-sysfs device */
struct iommu_device iommu; /* IOMMU core code handle */
int node;
u32 flags; /* Software defined flags */
};

View file

@ -31,6 +31,13 @@
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
#define IOMMU_NOEXEC (1 << 3)
#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
/*
* This is to make the IOMMU API setup privileged
* mapppings accessible by the master only at higher
* privileged execution level and inaccessible at
* less privileged levels.
*/
#define IOMMU_PRIV (1 << 5)
struct iommu_ops;
struct iommu_group;
@ -117,18 +124,25 @@ enum iommu_attr {
DOMAIN_ATTR_MAX,
};
/* These are the possible reserved region types */
#define IOMMU_RESV_DIRECT (1 << 0)
#define IOMMU_RESV_RESERVED (1 << 1)
#define IOMMU_RESV_MSI (1 << 2)
/**
* struct iommu_dm_region - descriptor for a direct mapped memory region
* struct iommu_resv_region - descriptor for a reserved memory region
* @list: Linked list pointers
* @start: System physical start address of the region
* @length: Length of the region in bytes
* @prot: IOMMU Protection flags (READ/WRITE/...)
* @type: Type of the reserved region
*/
struct iommu_dm_region {
struct iommu_resv_region {
struct list_head list;
phys_addr_t start;
size_t length;
int prot;
int type;
};
#ifdef CONFIG_IOMMU_API
@ -150,9 +164,9 @@ struct iommu_dm_region {
* @device_group: find iommu group for a particular device
* @domain_get_attr: Query domain attributes
* @domain_set_attr: Change domain attributes
* @get_dm_regions: Request list of direct mapping requirements for a device
* @put_dm_regions: Free list of direct mapping requirements for a device
* @apply_dm_region: Temporary helper call-back for iova reserved ranges
* @get_resv_regions: Request list of reserved regions for a device
* @put_resv_regions: Free list of reserved regions for a device
* @apply_resv_region: Temporary helper call-back for iova reserved ranges
* @domain_window_enable: Configure and enable a particular window for a domain
* @domain_window_disable: Disable a particular window for a domain
* @domain_set_windows: Set the number of windows for a domain
@ -184,11 +198,12 @@ struct iommu_ops {
int (*domain_set_attr)(struct iommu_domain *domain,
enum iommu_attr attr, void *data);
/* Request/Free a list of direct mapping requirements for a device */
void (*get_dm_regions)(struct device *dev, struct list_head *list);
void (*put_dm_regions)(struct device *dev, struct list_head *list);
void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain,
struct iommu_dm_region *region);
/* Request/Free a list of reserved regions for a device */
void (*get_resv_regions)(struct device *dev, struct list_head *list);
void (*put_resv_regions)(struct device *dev, struct list_head *list);
void (*apply_resv_region)(struct device *dev,
struct iommu_domain *domain,
struct iommu_resv_region *region);
/* Window handling functions */
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
@ -204,6 +219,42 @@ struct iommu_ops {
unsigned long pgsize_bitmap;
};
/**
* struct iommu_device - IOMMU core representation of one IOMMU hardware
* instance
* @list: Used by the iommu-core to keep a list of registered iommus
* @ops: iommu-ops for talking to this iommu
* @dev: struct device for sysfs handling
*/
struct iommu_device {
struct list_head list;
const struct iommu_ops *ops;
struct fwnode_handle *fwnode;
struct device dev;
};
int iommu_device_register(struct iommu_device *iommu);
void iommu_device_unregister(struct iommu_device *iommu);
int iommu_device_sysfs_add(struct iommu_device *iommu,
struct device *parent,
const struct attribute_group **groups,
const char *fmt, ...) __printf(4, 5);
void iommu_device_sysfs_remove(struct iommu_device *iommu);
int iommu_device_link(struct iommu_device *iommu, struct device *link);
void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
static inline void iommu_device_set_ops(struct iommu_device *iommu,
const struct iommu_ops *ops)
{
iommu->ops = ops;
}
static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
struct fwnode_handle *fwnode)
{
iommu->fwnode = fwnode;
}
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
@ -233,9 +284,13 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
extern int iommu_request_dm_for_dev(struct device *dev);
extern struct iommu_resv_region *
iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type);
extern int iommu_get_group_resv_regions(struct iommu_group *group,
struct list_head *head);
extern int iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group);
@ -267,12 +322,6 @@ extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
void *data);
extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
void *data);
struct device *iommu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
const char *fmt, ...) __printf(4, 5);
void iommu_device_destroy(struct device *dev);
int iommu_device_link(struct device *dev, struct device *link);
void iommu_device_unlink(struct device *dev, struct device *link);
/* Window handling function prototypes */
extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
@ -352,15 +401,14 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
const struct iommu_ops *ops);
void iommu_fwspec_free(struct device *dev);
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
void iommu_register_instance(struct fwnode_handle *fwnode,
const struct iommu_ops *ops);
const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode);
const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
struct iommu_group {};
struct iommu_fwspec {};
struct iommu_device {};
static inline bool iommu_present(struct bus_type *bus)
{
@ -443,16 +491,22 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
{
}
static inline void iommu_get_dm_regions(struct device *dev,
static inline void iommu_get_resv_regions(struct device *dev,
struct list_head *list)
{
}
static inline void iommu_put_dm_regions(struct device *dev,
static inline void iommu_put_resv_regions(struct device *dev,
struct list_head *list)
{
}
static inline int iommu_get_group_resv_regions(struct iommu_group *group,
struct list_head *head)
{
return -ENODEV;
}
static inline int iommu_request_dm_for_dev(struct device *dev)
{
return -ENODEV;
@ -546,15 +600,34 @@ static inline int iommu_domain_set_attr(struct iommu_domain *domain,
return -EINVAL;
}
static inline struct device *iommu_device_create(struct device *parent,
void *drvdata,
const struct attribute_group **groups,
const char *fmt, ...)
static inline int iommu_device_register(struct iommu_device *iommu)
{
return ERR_PTR(-ENODEV);
return -ENODEV;
}
static inline void iommu_device_destroy(struct device *dev)
static inline void iommu_device_set_ops(struct iommu_device *iommu,
const struct iommu_ops *ops)
{
}
static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
struct fwnode_handle *fwnode)
{
}
static inline void iommu_device_unregister(struct iommu_device *iommu)
{
}
static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
struct device *parent,
const struct attribute_group **groups,
const char *fmt, ...)
{
return -ENODEV;
}
static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
{
}
@ -584,13 +657,8 @@ static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
return -ENODEV;
}
static inline void iommu_register_instance(struct fwnode_handle *fwnode,
const struct iommu_ops *ops)
{
}
static inline
const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
{
return NULL;
}

View file

@ -184,6 +184,7 @@ struct irq_data {
*
* IRQD_TRIGGER_MASK - Mask for the trigger type bits
* IRQD_SETAFFINITY_PENDING - Affinity setting is pending
* IRQD_ACTIVATED - Interrupt has already been activated
* IRQD_NO_BALANCING - Balancing disabled for this IRQ
* IRQD_PER_CPU - Interrupt is per cpu
* IRQD_AFFINITY_SET - Interrupt affinity was set
@ -202,6 +203,7 @@ struct irq_data {
enum {
IRQD_TRIGGER_MASK = 0xf,
IRQD_SETAFFINITY_PENDING = (1 << 8),
IRQD_ACTIVATED = (1 << 9),
IRQD_NO_BALANCING = (1 << 10),
IRQD_PER_CPU = (1 << 11),
IRQD_AFFINITY_SET = (1 << 12),
@ -312,6 +314,21 @@ static inline bool irqd_affinity_is_managed(struct irq_data *d)
return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
}
static inline bool irqd_is_activated(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_ACTIVATED;
}
static inline void irqd_set_activated(struct irq_data *d)
{
__irqd_to_state(d) |= IRQD_ACTIVATED;
}
static inline void irqd_clr_activated(struct irq_data *d)
{
__irqd_to_state(d) &= ~IRQD_ACTIVATED;
}
#undef __irqd_to_state
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)

View file

@ -183,6 +183,12 @@ enum {
/* Irq domain is an IPI domain with single virq */
IRQ_DOMAIN_FLAG_IPI_SINGLE = (1 << 3),
/* Irq domain implements MSIs */
IRQ_DOMAIN_FLAG_MSI = (1 << 4),
/* Irq domain implements MSI remapping */
IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5),
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
* for implementation specific purposes and ignored by the
@ -216,6 +222,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
void *host_data);
extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token);
extern bool irq_domain_check_msi_remap(void);
extern void irq_set_default_host(struct irq_domain *host);
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
irq_hw_number_t hwirq, int node,
@ -446,6 +453,19 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
{
return domain->flags & IRQ_DOMAIN_FLAG_IPI_SINGLE;
}
static inline bool irq_domain_is_msi(struct irq_domain *domain)
{
return domain->flags & IRQ_DOMAIN_FLAG_MSI;
}
static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
{
return domain->flags & IRQ_DOMAIN_FLAG_MSI_REMAP;
}
extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
static inline void irq_domain_activate_irq(struct irq_data *data) { }
static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
@ -477,6 +497,22 @@ static inline bool irq_domain_is_ipi_single(struct irq_domain *domain)
{
return false;
}
static inline bool irq_domain_is_msi(struct irq_domain *domain)
{
return false;
}
static inline bool irq_domain_is_msi_remap(struct irq_domain *domain)
{
return false;
}
static inline bool
irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
{
return false;
}
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
#else /* CONFIG_IRQ_DOMAIN */

View file

@ -14,6 +14,7 @@ struct static_key_deferred {
#ifdef HAVE_JUMP_LABEL
extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
extern void static_key_deferred_flush(struct static_key_deferred *key);
extern void
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
@ -26,6 +27,10 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
STATIC_KEY_CHECK_USE();
static_key_slow_dec(&key->key);
}
static inline void static_key_deferred_flush(struct static_key_deferred *key)
{
STATIC_KEY_CHECK_USE();
}
static inline void
jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)

View file

@ -514,8 +514,8 @@ extern enum system_states {
#define TAINT_FLAGS_COUNT 16
struct taint_flag {
char true; /* character printed when tainted */
char false; /* character printed when not tainted */
char c_true; /* character printed when tainted */
char c_false; /* character printed when not tainted */
bool module; /* also show as a per-module taint flag */
};

View file

@ -203,6 +203,17 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
* ... and so on.
*/
#define order_base_2(n) ilog2(roundup_pow_of_two(n))
static inline __attribute_const__
int __order_base_2(unsigned long n)
{
return n > 1 ? ilog2(n - 1) + 1 : 0;
}
#define order_base_2(n) \
( \
__builtin_constant_p(n) ? ( \
((n) == 0 || (n) == 1) ? 0 : \
ilog2((n) - 1) + 1) : \
__order_base_2(n) \
)
#endif /* _LINUX_LOG2_H */

View file

@ -13,34 +13,10 @@
#ifndef MDEV_H
#define MDEV_H
/* Parent device */
struct parent_device {
struct device *dev;
const struct parent_ops *ops;
/* internal */
struct kref ref;
struct mutex lock;
struct list_head next;
struct kset *mdev_types_kset;
struct list_head type_list;
};
/* Mediated device */
struct mdev_device {
struct device dev;
struct parent_device *parent;
uuid_le uuid;
void *driver_data;
/* internal */
struct kref ref;
struct list_head next;
struct kobject *type_kobj;
};
struct mdev_device;
/**
* struct parent_ops - Structure to be registered for each parent device to
* struct mdev_parent_ops - Structure to be registered for each parent device to
* register the device to mdev module.
*
* @owner: The module owner.
@ -86,10 +62,9 @@ struct mdev_device {
* @mdev: mediated device structure
* @vma: vma structure
* Parent device that support mediated device should be registered with mdev
* module with parent_ops structure.
* module with mdev_parent_ops structure.
**/
struct parent_ops {
struct mdev_parent_ops {
struct module *owner;
const struct attribute_group **dev_attr_groups;
const struct attribute_group **mdev_attr_groups;
@ -103,7 +78,7 @@ struct parent_ops {
size_t count, loff_t *ppos);
ssize_t (*write)(struct mdev_device *mdev, const char __user *buf,
size_t count, loff_t *ppos);
ssize_t (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
long (*ioctl)(struct mdev_device *mdev, unsigned int cmd,
unsigned long arg);
int (*mmap)(struct mdev_device *mdev, struct vm_area_struct *vma);
};
@ -142,27 +117,22 @@ struct mdev_driver {
};
#define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver)
#define to_mdev_device(dev) container_of(dev, struct mdev_device, dev)
static inline void *mdev_get_drvdata(struct mdev_device *mdev)
{
return mdev->driver_data;
}
static inline void mdev_set_drvdata(struct mdev_device *mdev, void *data)
{
mdev->driver_data = data;
}
extern void *mdev_get_drvdata(struct mdev_device *mdev);
extern void mdev_set_drvdata(struct mdev_device *mdev, void *data);
extern uuid_le mdev_uuid(struct mdev_device *mdev);
extern struct bus_type mdev_bus_type;
#define dev_is_mdev(d) ((d)->bus == &mdev_bus_type)
extern int mdev_register_device(struct device *dev,
const struct parent_ops *ops);
const struct mdev_parent_ops *ops);
extern void mdev_unregister_device(struct device *dev);
extern int mdev_register_driver(struct mdev_driver *drv, struct module *owner);
extern void mdev_unregister_driver(struct mdev_driver *drv);
extern struct device *mdev_parent_dev(struct mdev_device *mdev);
extern struct device *mdev_dev(struct mdev_device *mdev);
extern struct mdev_device *mdev_from_dev(struct device *dev);
#endif /* MDEV_H */

View file

@ -120,7 +120,7 @@ struct mem_cgroup_reclaim_iter {
*/
struct mem_cgroup_per_node {
struct lruvec lruvec;
unsigned long lru_size[NR_LRU_LISTS];
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
@ -432,7 +432,7 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int nr_pages);
int zid, int nr_pages);
unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
int nid, unsigned int lru_mask);
@ -441,9 +441,23 @@ static inline
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
struct mem_cgroup_per_node *mz;
unsigned long nr_pages = 0;
int zid;
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
return mz->lru_size[lru];
for (zid = 0; zid < MAX_NR_ZONES; zid++)
nr_pages += mz->lru_zone_size[zid][lru];
return nr_pages;
}
static inline
unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
enum lru_list lru, int zone_idx)
{
struct mem_cgroup_per_node *mz;
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
return mz->lru_zone_size[zone_idx][lru];
}
void mem_cgroup_handle_over_high(void);
@ -671,6 +685,12 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
return 0;
}
static inline
unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
enum lru_list lru, int zone_idx)
{
return 0;
}
static inline unsigned long
mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,

View file

@ -85,7 +85,8 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
/* VM interface that may be used by firmware interface */
extern int online_pages(unsigned long, unsigned long, int);
extern int test_pages_in_a_zone(unsigned long, unsigned long);
extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
unsigned long *valid_start, unsigned long *valid_end);
extern void __offline_isolated_pages(unsigned long, unsigned long);
typedef void (*online_page_callback_t)(struct page *page);
@ -284,7 +285,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
unsigned long map_offset);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
enum zone_type target);
extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
enum zone_type target, int *zone_shift);
#endif /* __LINUX_MEMORY_HOTPLUG_H */

View file

@ -35,6 +35,8 @@
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
#define PHY_ID_KSZ8795 0x00221550
/* struct phy_device dev_flags definitions */
#define MICREL_PHY_50MHZ_CLK 0x00000001
#define MICREL_PHY_FXEN 0x00000002

View file

@ -1384,6 +1384,8 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
bool *vlan_offload_disabled);
void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
struct _rule_hw *eth_header);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);

View file

@ -1071,11 +1071,6 @@ enum {
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
};
enum {
MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
};
static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
{
if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)

View file

@ -123,7 +123,6 @@ enum {
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MLCR = 0x902b,
MLX5_REG_MPCNT = 0x9051,
};
enum mlx5_dcbx_oper_mode {

View file

@ -1757,80 +1757,6 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
u8 reserved_at_4c0[0x300];
};
struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
u8 life_time_counter_high[0x20];
u8 life_time_counter_low[0x20];
u8 rx_errors[0x20];
u8 tx_errors[0x20];
u8 l0_to_recovery_eieos[0x20];
u8 l0_to_recovery_ts[0x20];
u8 l0_to_recovery_framing[0x20];
u8 l0_to_recovery_retrain[0x20];
u8 crc_error_dllp[0x20];
u8 crc_error_tlp[0x20];
u8 reserved_at_140[0x680];
};
struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits {
u8 life_time_counter_high[0x20];
u8 life_time_counter_low[0x20];
u8 time_to_boot_image_start[0x20];
u8 time_to_link_image[0x20];
u8 calibration_time[0x20];
u8 time_to_first_perst[0x20];
u8 time_to_detect_state[0x20];
u8 time_to_l0[0x20];
u8 time_to_crs_en[0x20];
u8 time_to_plastic_image_start[0x20];
u8 time_to_iron_image_start[0x20];
u8 perst_handler[0x20];
u8 times_in_l1[0x20];
u8 times_in_l23[0x20];
u8 dl_down[0x20];
u8 config_cycle1usec[0x20];
u8 config_cycle2to7usec[0x20];
u8 config_cycle_8to15usec[0x20];
u8 config_cycle_16_to_63usec[0x20];
u8 config_cycle_64usec[0x20];
u8 correctable_err_msg_sent[0x20];
u8 non_fatal_err_msg_sent[0x20];
u8 fatal_err_msg_sent[0x20];
u8 reserved_at_2e0[0x4e0];
};
struct mlx5_ifc_cmd_inter_comp_event_bits {
u8 command_completion_vector[0x20];
@ -2995,12 +2921,6 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
u8 reserved_at_0[0x7c0];
};
union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout;
struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits pcie_tas_cntrs_grp_data_layout;
u8 reserved_at_0[0x7c0];
};
union mlx5_ifc_event_auto_bits {
struct mlx5_ifc_comp_event_bits comp_event;
struct mlx5_ifc_dct_events_bits dct_events;
@ -7320,18 +7240,6 @@ struct mlx5_ifc_ppcnt_reg_bits {
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
};
struct mlx5_ifc_mpcnt_reg_bits {
u8 reserved_at_0[0x8];
u8 pcie_index[0x8];
u8 reserved_at_10[0xa];
u8 grp[0x6];
u8 clr[0x1];
u8 reserved_at_21[0x1f];
union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set;
};
struct mlx5_ifc_ppad_reg_bits {
u8 reserved_at_0[0x3];
u8 single_mac[0x1];
@ -7937,7 +7845,6 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
struct mlx5_ifc_ppad_reg_bits ppad_reg;
struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
struct mlx5_ifc_pplm_reg_bits pplm_reg;
struct mlx5_ifc_pplr_reg_bits pplr_reg;
struct mlx5_ifc_ppsc_reg_bits ppsc_reg;

View file

@ -1210,8 +1210,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma);
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
spinlock_t **ptlp);
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,

View file

@ -39,7 +39,7 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
{
__update_lru_size(lruvec, lru, zid, nr_pages);
#ifdef CONFIG_MEMCG
mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
#endif
}

View file

@ -972,12 +972,16 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
* @zonelist - The zonelist to search for a suitable zone
* @highest_zoneidx - The zone index of the highest zone to return
* @nodes - An optional nodemask to filter the zonelist with
* @zone - The first suitable zone found is returned via this parameter
* @return - Zoneref pointer for the first suitable zone found (see below)
*
* This function returns the first zone at or below a given zone index that is
* within the allowed nodemask. The zoneref returned is a cursor that can be
* used to iterate the zonelist with next_zones_zonelist by advancing it by
* one before calling.
*
* When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
* never NULL). This may happen either genuinely, or due to concurrent nodemask
* update due to cpuset modification.
*/
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
enum zone_type highest_zoneidx,

View file

@ -346,7 +346,7 @@ struct module {
/* Exported symbols */
const struct kernel_symbol *syms;
const unsigned long *crcs;
const s32 *crcs;
unsigned int num_syms;
/* Kernel parameters. */
@ -359,18 +359,18 @@ struct module {
/* GPL-only exported symbols. */
unsigned int num_gpl_syms;
const struct kernel_symbol *gpl_syms;
const unsigned long *gpl_crcs;
const s32 *gpl_crcs;
#ifdef CONFIG_UNUSED_SYMBOLS
/* unused exported symbols. */
const struct kernel_symbol *unused_syms;
const unsigned long *unused_crcs;
const s32 *unused_crcs;
unsigned int num_unused_syms;
/* GPL-only, unused exported symbols. */
unsigned int num_unused_gpl_syms;
const struct kernel_symbol *unused_gpl_syms;
const unsigned long *unused_gpl_crcs;
const s32 *unused_gpl_crcs;
#endif
#ifdef CONFIG_MODULE_SIG
@ -382,7 +382,7 @@ struct module {
/* symbols that will be GPL-only in the near future. */
const struct kernel_symbol *gpl_future_syms;
const unsigned long *gpl_future_crcs;
const s32 *gpl_future_crcs;
unsigned int num_gpl_future_syms;
/* Exception table */
@ -523,7 +523,7 @@ struct module *find_module(const char *name);
struct symsearch {
const struct kernel_symbol *start, *stop;
const unsigned long *crcs;
const s32 *crcs;
enum {
NOT_GPL_ONLY,
GPL_ONLY,
@ -539,7 +539,7 @@ struct symsearch {
*/
const struct kernel_symbol *find_symbol(const char *name,
struct module **owner,
const unsigned long **crc,
const s32 **crc,
bool gplok,
bool warn);

View file

@ -866,11 +866,15 @@ struct netdev_xdp {
* of useless work if you return NETDEV_TX_BUSY.
* Required; cannot be NULL.
*
* netdev_features_t (*ndo_fix_features)(struct net_device *dev,
* netdev_features_t features);
* Adjusts the requested feature flags according to device-specific
* constraints, and returns the resulting flags. Must not modify
* the device state.
* netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
* struct net_device *dev
* netdev_features_t features);
* Called by core transmit path to determine if device is capable of
* performing offload operations on a given packet. This is to give
* the device an opportunity to implement any restrictions that cannot
* be otherwise expressed by feature flags. The check is called with
* the set of features that the stack has calculated and it returns
* those the driver believes to be appropriate.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* void *accel_priv, select_queue_fallback_t fallback);
@ -1028,6 +1032,12 @@ struct netdev_xdp {
* Called to release previously enslaved netdev.
*
* Feature/offload setting functions.
* netdev_features_t (*ndo_fix_features)(struct net_device *dev,
* netdev_features_t features);
* Adjusts the requested feature flags according to device-specific
* constraints, and returns the resulting flags. Must not modify
* the device state.
*
* int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
* Called to update device configuration to new features. Passed
* feature set might be less than what was returned by ndo_fix_features()).
@ -1100,15 +1110,6 @@ struct netdev_xdp {
* Callback to use for xmit over the accelerated station. This
* is used in place of ndo_start_xmit on accelerated net
* devices.
* netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
* struct net_device *dev
* netdev_features_t features);
* Called by core transmit path to determine if device is capable of
* performing offload operations on a given packet. This is to give
* the device an opportunity to implement any restrictions that cannot
* be otherwise expressed by feature flags. The check is called with
* the set of features that the stack has calculated and it returns
* those the driver believes to be appropriate.
* int (*ndo_set_tx_maxrate)(struct net_device *dev,
* int queue_index, u32 maxrate);
* Called when a user wants to set a max-rate limitation of specific
@ -2477,14 +2478,19 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
return NAPI_GRO_CB(skb)->frag0_len < hlen;
}
static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
{
NAPI_GRO_CB(skb)->frag0 = NULL;
NAPI_GRO_CB(skb)->frag0_len = 0;
}
static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
unsigned int offset)
{
if (!pskb_may_pull(skb, hlen))
return NULL;
NAPI_GRO_CB(skb)->frag0 = NULL;
NAPI_GRO_CB(skb)->frag0_len = 0;
skb_gro_frag0_invalidate(skb);
return skb->data + offset;
}

View file

@ -282,7 +282,7 @@ enum nfsstat4 {
static inline bool seqid_mutating_err(u32 err)
{
/* rfc 3530 section 8.1.5: */
/* See RFC 7530, section 9.1.7 */
switch (err) {
case NFS4ERR_STALE_CLIENTID:
case NFS4ERR_STALE_STATEID:
@ -291,6 +291,7 @@ static inline bool seqid_mutating_err(u32 err)
case NFS4ERR_BADXDR:
case NFS4ERR_RESOURCE:
case NFS4ERR_NOFILEHANDLE:
case NFS4ERR_MOVED:
return false;
};
return true;

View file

@ -110,6 +110,7 @@ extern int watchdog_user_enabled;
extern int watchdog_thresh;
extern unsigned long watchdog_enabled;
extern unsigned long *watchdog_cpumask_bits;
extern atomic_t watchdog_park_in_progress;
#ifdef CONFIG_SMP
extern int sysctl_softlockup_all_cpu_backtrace;
extern int sysctl_hardlockup_all_cpu_backtrace;

View file

@ -31,17 +31,6 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
#endif /* CONFIG_OF_IOMMU */
static inline void of_iommu_set_ops(struct device_node *np,
const struct iommu_ops *ops)
{
iommu_register_instance(&np->fwnode, ops);
}
static inline const struct iommu_ops *of_iommu_get_ops(struct device_node *np)
{
return iommu_get_instance(&np->fwnode);
}
extern struct of_device_id __iommu_of_table;
typedef int (*of_iommu_init_fn)(struct device_node *);

View file

@ -204,7 +204,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
static inline bool percpu_ref_tryget(struct percpu_ref *ref)
{
unsigned long __percpu *percpu_count;
int ret;
bool ret;
rcu_read_lock_sched();
@ -238,7 +238,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
{
unsigned long __percpu *percpu_count;
int ret = false;
bool ret = false;
rcu_read_lock_sched();

View file

@ -1259,6 +1259,7 @@ extern void perf_event_disable(struct perf_event *event);
extern void perf_event_disable_local(struct perf_event *event);
extern void perf_event_disable_inatomic(struct perf_event *event);
extern void perf_event_task_tick(void);
extern int perf_event_account_interrupt(struct perf_event *event);
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,

View file

@ -25,7 +25,6 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mod_devicetable.h>
#include <linux/phy_led_triggers.h>
#include <linux/atomic.h>

View file

@ -18,11 +18,11 @@ struct phy_device;
#ifdef CONFIG_LED_TRIGGER_PHY
#include <linux/leds.h>
#include <linux/phy.h>
#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10
#define PHY_MII_BUS_ID_SIZE (20 - 3)
#define PHY_LINK_LED_TRIGGER_NAME_SIZE (PHY_MII_BUS_ID_SIZE + \
#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
FIELD_SIZEOF(struct mdio_device, addr)+\
PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE)

View file

@ -306,7 +306,9 @@ void radix_tree_iter_replace(struct radix_tree_root *,
void radix_tree_replace_slot(struct radix_tree_root *root,
void **slot, void *item);
void __radix_tree_delete_node(struct radix_tree_root *root,
struct radix_tree_node *node);
struct radix_tree_node *node,
radix_tree_update_node_t update_node,
void *private);
void *radix_tree_delete_item(struct radix_tree_root *, unsigned long, void *);
void *radix_tree_delete(struct radix_tree_root *, unsigned long);
void radix_tree_clear_tags(struct radix_tree_root *root,

View file

@ -444,6 +444,10 @@ bool __rcu_is_watching(void);
#error "Unknown RCU implementation specified to kernel configuration"
#endif
#define RCU_SCHEDULER_INACTIVE 0
#define RCU_SCHEDULER_INIT 1
#define RCU_SCHEDULER_RUNNING 2
/*
* init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
* initialization and destruction of rcu_head on the stack. rcu_head structures

View file

@ -408,7 +408,8 @@ enum rproc_crash_type {
* @crash_comp: completion used to sync crash handler and the rproc reload
* @recovery_disabled: flag that state if recovery was disabled
* @max_notifyid: largest allocated notify id.
* @table_ptr: our copy of the resource table
* @table_ptr: pointer to the resource table in effect
* @cached_table: copy of the resource table
* @has_iommu: flag to indicate if remote processor is behind an MMU
*/
struct rproc {
@ -440,6 +441,7 @@ struct rproc {
bool recovery_disabled;
int max_notifyid;
struct resource_table *table_ptr;
struct resource_table *cached_table;
bool has_iommu;
bool auto_boot;
};

View file

@ -854,6 +854,16 @@ struct signal_struct {
#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
SIGNAL_STOP_CONTINUED)
static inline void signal_set_stop_flags(struct signal_struct *sig,
unsigned int flags)
{
WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
}
/* If true, all threads except ->group_exit_task have pending SIGKILL */
static inline int signal_group_exit(const struct signal_struct *sig)
{

View file

@ -2480,7 +2480,7 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
static inline void skb_free_frag(void *addr)
{
__free_page_frag(addr);
page_frag_free(addr);
}
void *napi_alloc_frag(unsigned int fragsz);

View file

@ -226,7 +226,7 @@ static inline const char *__check_heap_object(const void *ptr,
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
@ -239,7 +239,7 @@ static inline const char *__check_heap_object(const void *ptr,
* be allocated from the same page.
*/
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
#define KMALLOC_SHIFT_MAX 30
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif

View file

@ -216,5 +216,6 @@ void rpc_clnt_xprt_switch_put(struct rpc_clnt *);
void rpc_clnt_xprt_switch_add_xprt(struct rpc_clnt *, struct rpc_xprt *);
bool rpc_clnt_xprt_switch_has_addr(struct rpc_clnt *clnt,
const struct sockaddr *sap);
void rpc_cleanup_clids(void);
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_CLNT_H */

View file

@ -66,6 +66,7 @@ struct svc_xprt {
#define XPT_LISTENER 10 /* listening endpoint */
#define XPT_CACHE_AUTH 11 /* cache auth info */
#define XPT_LOCAL 12 /* connection from loopback interface */
#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */
struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */

View file

@ -194,8 +194,6 @@ struct platform_freeze_ops {
};
#ifdef CONFIG_SUSPEND
extern suspend_state_t mem_sleep_default;
/**
* suspend_set_ops - set platform dependent suspend operations
* @ops: The new suspend operations to set.

View file

@ -150,8 +150,9 @@ enum {
SWP_FILE = (1 << 7), /* set after swap_activate success */
SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */
/* add others here before... */
SWP_SCANNING = (1 << 10), /* refcount in scan_swap_map */
SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32UL

View file

@ -9,7 +9,13 @@ struct device;
struct page;
struct scatterlist;
extern int swiotlb_force;
enum swiotlb_force {
SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */
SWIOTLB_FORCE, /* swiotlb=force */
SWIOTLB_NO_FORCE, /* swiotlb=noforce */
};
extern enum swiotlb_force swiotlb_force;
/*
* Maximum allowable number of contiguous slabs to map,
@ -108,11 +114,14 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask);
#ifdef CONFIG_SWIOTLB
extern void __init swiotlb_free(void);
unsigned int swiotlb_max_segment(void);
#else
static inline void swiotlb_free(void) { }
static inline unsigned int swiotlb_max_segment(void) { return 0; }
#endif
extern void swiotlb_print_info(void);
extern int is_swiotlb_buffer(phys_addr_t paddr);
extern void swiotlb_set_max_segment(unsigned int);
#endif /* __LINUX_SWIOTLB_H */

View file

@ -62,8 +62,13 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
/* TCP Fast Open Cookie as stored in memory */
struct tcp_fastopen_cookie {
union {
u8 val[TCP_FASTOPEN_COOKIE_MAX];
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr addr;
#endif
};
s8 len;
u8 val[TCP_FASTOPEN_COOKIE_MAX];
bool exp; /* In RFC6994 experimental option format */
};

View file

@ -8,23 +8,7 @@
#ifndef _LINUX_TIMERFD_H
#define _LINUX_TIMERFD_H
/* For O_CLOEXEC and O_NONBLOCK */
#include <linux/fcntl.h>
/* For _IO helpers */
#include <linux/ioctl.h>
/*
* CAREFUL: Check include/asm-generic/fcntl.h when defining
* new flags, since they might collide with O_* ones. We want
* to re-use O_* flags that couldn't possibly have a meaning
* from eventfd, in order to leave a free define-space for
* shared O_* flags.
*/
#define TFD_TIMER_ABSTIME (1 << 0)
#define TFD_TIMER_CANCEL_ON_SET (1 << 1)
#define TFD_CLOEXEC O_CLOEXEC
#define TFD_NONBLOCK O_NONBLOCK
#include <uapi/linux/timerfd.h>
#define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK)
/* Flags for timerfd_create. */
@ -32,6 +16,4 @@
/* Flags for timerfd_settime. */
#define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET)
#define TFD_IOC_SET_TICKS _IOW('T', 0, u64)
#endif /* _LINUX_TIMERFD_H */

View file

@ -56,7 +56,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
struct virtio_net_hdr *hdr,
bool little_endian)
bool little_endian,
bool has_data_valid)
{
memset(hdr, 0, sizeof(*hdr)); /* no info leak */
@ -91,7 +92,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
skb_checksum_start_offset(skb));
hdr->csum_offset = __cpu_to_virtio16(little_endian,
skb->csum_offset);
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
} else if (has_data_valid &&
skb->ip_summed == CHECKSUM_UNNECESSARY) {
hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */