Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Merge conflict of mlx5 resolved using instructions in merge
commit 9566e650bf.
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
446bf64b61
549 changed files with 4710 additions and 2902 deletions
|
|
@ -311,7 +311,6 @@ enum req_flag_bits {
|
|||
__REQ_RAHEAD, /* read ahead, can fail anytime */
|
||||
__REQ_BACKGROUND, /* background IO */
|
||||
__REQ_NOWAIT, /* Don't wait if request will block */
|
||||
__REQ_NOWAIT_INLINE, /* Return would-block error inline */
|
||||
/*
|
||||
* When a shared kthread needs to issue a bio for a cgroup, doing
|
||||
* so synchronously can lead to priority inversions as the kthread
|
||||
|
|
@ -346,7 +345,6 @@ enum req_flag_bits {
|
|||
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
|
||||
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
|
||||
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
|
||||
#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE)
|
||||
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
|
||||
|
||||
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
|
||||
|
|
@ -420,13 +418,12 @@ static inline int op_stat_group(unsigned int op)
|
|||
|
||||
typedef unsigned int blk_qc_t;
|
||||
#define BLK_QC_T_NONE -1U
|
||||
#define BLK_QC_T_EAGAIN -2U
|
||||
#define BLK_QC_T_SHIFT 16
|
||||
#define BLK_QC_T_INTERNAL (1U << 31)
|
||||
|
||||
static inline bool blk_qc_t_valid(blk_qc_t cookie)
|
||||
{
|
||||
return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
|
||||
return cookie != BLK_QC_T_NONE;
|
||||
}
|
||||
|
||||
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
|
||||
|
|
|
|||
|
|
@ -170,6 +170,8 @@ struct ccp_aes_engine {
|
|||
enum ccp_aes_mode mode;
|
||||
enum ccp_aes_action action;
|
||||
|
||||
u32 authsize;
|
||||
|
||||
struct scatterlist *key;
|
||||
u32 key_len; /* In bytes */
|
||||
|
||||
|
|
|
|||
|
|
@ -42,13 +42,18 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|||
dma_addr_t dma_addr, unsigned long attrs);
|
||||
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
|
||||
dma_addr_t dma_addr);
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT
|
||||
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
|
||||
unsigned long attrs);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
|
||||
#else
|
||||
# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot)
|
||||
#endif
|
||||
static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
|
||||
unsigned long attrs)
|
||||
{
|
||||
return prot; /* no protection bits supported without page tables */
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
|
||||
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
|
|
|
|||
|
|
@ -510,22 +510,18 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
|
|||
}
|
||||
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
|
||||
struct vm_area_struct *vma, unsigned long addr,
|
||||
int node, bool hugepage);
|
||||
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
|
||||
alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
|
||||
int node);
|
||||
#else
|
||||
#define alloc_pages(gfp_mask, order) \
|
||||
alloc_pages_node(numa_node_id(), gfp_mask, order)
|
||||
#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
|
||||
alloc_pages(gfp_mask, order)
|
||||
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
|
||||
#define alloc_pages_vma(gfp_mask, order, vma, addr, node)\
|
||||
alloc_pages(gfp_mask, order)
|
||||
#endif
|
||||
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
|
||||
#define alloc_page_vma(gfp_mask, vma, addr) \
|
||||
alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
|
||||
alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
|
||||
#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
|
||||
alloc_pages_vma(gfp_mask, 0, vma, addr, node, false)
|
||||
alloc_pages_vma(gfp_mask, 0, vma, addr, node)
|
||||
|
||||
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
|
||||
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
|
||||
|
|
|
|||
|
|
@ -94,11 +94,11 @@ struct keyring_index_key {
|
|||
union {
|
||||
struct {
|
||||
#ifdef __LITTLE_ENDIAN /* Put desc_len at the LSB of x */
|
||||
u8 desc_len;
|
||||
char desc[sizeof(long) - 1]; /* First few chars of description */
|
||||
u16 desc_len;
|
||||
char desc[sizeof(long) - 2]; /* First few chars of description */
|
||||
#else
|
||||
char desc[sizeof(long) - 1]; /* First few chars of description */
|
||||
u8 desc_len;
|
||||
char desc[sizeof(long) - 2]; /* First few chars of description */
|
||||
u16 desc_len;
|
||||
#endif
|
||||
};
|
||||
unsigned long x;
|
||||
|
|
|
|||
|
|
@ -861,8 +861,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
|
|||
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
|
||||
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
|
||||
|
||||
bool kvm_arch_has_vcpu_debugfs(void);
|
||||
int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
|
||||
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
|
||||
void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu);
|
||||
#endif
|
||||
|
||||
int kvm_arch_hardware_enable(void);
|
||||
void kvm_arch_hardware_disable(void);
|
||||
|
|
@ -872,6 +873,7 @@ int kvm_arch_check_processor_compat(void);
|
|||
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
|
||||
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
|
||||
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
|
||||
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
|
||||
|
||||
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -668,6 +668,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
|
|||
|
||||
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
|
||||
int val);
|
||||
void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
|
||||
|
||||
static inline void mod_lruvec_state(struct lruvec *lruvec,
|
||||
enum node_stat_item idx, int val)
|
||||
|
|
@ -1072,6 +1073,14 @@ static inline void mod_lruvec_page_state(struct page *page,
|
|||
mod_node_page_state(page_pgdat(page), idx, val);
|
||||
}
|
||||
|
||||
static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
|
||||
int val)
|
||||
{
|
||||
struct page *page = virt_to_head_page(p);
|
||||
|
||||
__mod_node_page_state(page_pgdat(page), idx, val);
|
||||
}
|
||||
|
||||
static inline
|
||||
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
|
||||
gfp_t gfp_mask,
|
||||
|
|
@ -1159,6 +1168,16 @@ static inline void __dec_lruvec_page_state(struct page *page,
|
|||
__mod_lruvec_page_state(page, idx, -1);
|
||||
}
|
||||
|
||||
static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
|
||||
{
|
||||
__mod_lruvec_slab_state(p, idx, 1);
|
||||
}
|
||||
|
||||
static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
|
||||
{
|
||||
__mod_lruvec_slab_state(p, idx, -1);
|
||||
}
|
||||
|
||||
/* idx can be of type enum memcg_stat_item or node_stat_item */
|
||||
static inline void inc_memcg_state(struct mem_cgroup *memcg,
|
||||
int idx)
|
||||
|
|
|
|||
|
|
@ -139,6 +139,8 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
|
|||
struct mempolicy *get_task_policy(struct task_struct *p);
|
||||
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
|
||||
unsigned long addr);
|
||||
struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
|
||||
unsigned long addr);
|
||||
bool vma_policy_mof(struct vm_area_struct *vma);
|
||||
|
||||
extern void numa_default_policy(void);
|
||||
|
|
|
|||
|
|
@ -446,11 +446,11 @@ enum {
|
|||
};
|
||||
|
||||
enum {
|
||||
MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x20,
|
||||
MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x20,
|
||||
MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
|
||||
};
|
||||
|
||||
enum {
|
||||
|
|
|
|||
|
|
@ -10078,9 +10078,8 @@ struct mlx5_ifc_tls_static_params_bits {
|
|||
};
|
||||
|
||||
struct mlx5_ifc_tls_progress_params_bits {
|
||||
u8 valid[0x1];
|
||||
u8 reserved_at_1[0x7];
|
||||
u8 pd[0x18];
|
||||
u8 reserved_at_0[0x8];
|
||||
u8 tisn[0x18];
|
||||
|
||||
u8 next_record_tcp_sn[0x20];
|
||||
|
||||
|
|
|
|||
|
|
@ -159,7 +159,16 @@ struct page {
|
|||
/** @pgmap: Points to the hosting device page map. */
|
||||
struct dev_pagemap *pgmap;
|
||||
void *zone_device_data;
|
||||
unsigned long _zd_pad_1; /* uses mapping */
|
||||
/*
|
||||
* ZONE_DEVICE private pages are counted as being
|
||||
* mapped so the next 3 words hold the mapping, index,
|
||||
* and private fields from the source anonymous or
|
||||
* page cache page while the page is migrated to device
|
||||
* private memory.
|
||||
* ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
|
||||
* use the mapping, index, and private fields when
|
||||
* pmem backed DAX files are mapped.
|
||||
*/
|
||||
};
|
||||
|
||||
/** @rcu_head: You can use this to free a page by RCU. */
|
||||
|
|
|
|||
|
|
@ -1567,8 +1567,10 @@ extern bool pcie_ports_native;
|
|||
|
||||
#ifdef CONFIG_PCIEASPM
|
||||
bool pcie_aspm_support_enabled(void);
|
||||
bool pcie_aspm_enabled(struct pci_dev *pdev);
|
||||
#else
|
||||
static inline bool pcie_aspm_support_enabled(void) { return false; }
|
||||
static inline bool pcie_aspm_enabled(struct pci_dev *pdev) { return false; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PCIEAER
|
||||
|
|
|
|||
|
|
@ -1362,6 +1362,14 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
|
|||
to->l4_hash = from->l4_hash;
|
||||
};
|
||||
|
||||
static inline void skb_copy_decrypted(struct sk_buff *to,
|
||||
const struct sk_buff *from)
|
||||
{
|
||||
#ifdef CONFIG_TLS_DEVICE
|
||||
to->decrypted = from->decrypted;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef NET_SKBUFF_DATA_USES_OFFSET
|
||||
static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -292,6 +292,9 @@ struct ucred {
|
|||
#define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */
|
||||
#define MSG_EOF MSG_FIN
|
||||
#define MSG_NO_SHARED_FRAGS 0x80000 /* sendpage() internal : page frags are not shared */
|
||||
#define MSG_SENDPAGE_DECRYPTED 0x100000 /* sendpage() internal : page may carry
|
||||
* plain text and require encryption
|
||||
*/
|
||||
|
||||
#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */
|
||||
#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
|
||||
|
|
|
|||
|
|
@ -1457,7 +1457,7 @@ typedef void (*usb_complete_t)(struct urb *);
|
|||
* field rather than determining a dma address themselves.
|
||||
*
|
||||
* Note that transfer_buffer must still be set if the controller
|
||||
* does not support DMA (as indicated by bus.uses_dma) and when talking
|
||||
* does not support DMA (as indicated by hcd_uses_dma()) and when talking
|
||||
* to root hub. If you have to trasfer between highmem zone and the device
|
||||
* on such controller, create a bounce buffer or bail out with an error.
|
||||
* If transfer_buffer cannot be set (is in highmem) and the controller is DMA
|
||||
|
|
|
|||
|
|
@ -422,6 +422,9 @@ static inline bool hcd_periodic_completion_in_progress(struct usb_hcd *hcd,
|
|||
return hcd->high_prio_bh.completing_ep == ep;
|
||||
}
|
||||
|
||||
#define hcd_uses_dma(hcd) \
|
||||
(IS_ENABLED(CONFIG_HAS_DMA) && (hcd)->self.uses_dma)
|
||||
|
||||
extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
|
||||
extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
|
||||
int status);
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue