Merge v5.7-rc5 into driver-core-next

We want the driver core fixes in here and this resolves a merge issue
with drivers/base/dd.c

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2020-05-11 09:00:09 +02:00
commit c8be6af9ef
461 changed files with 4390 additions and 2041 deletions

View file

@ -65,6 +65,7 @@ struct amba_device {
struct device dev;
struct resource res;
struct clk *pclk;
struct device_dma_parameters dma_parms;
unsigned int periphid;
unsigned int cid;
struct amba_cs_uci_id uci;

View file

@ -219,6 +219,7 @@ struct backing_dev_info {
wait_queue_head_t wb_waitq;
struct device *dev;
char dev_name[64];
struct device *owner;
struct timer_list laptop_mode_wb_timer;

View file

@ -505,13 +505,6 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
(1 << WB_async_congested));
}
extern const char *bdi_unknown_name;
static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
{
if (!bdi || !bdi->dev)
return bdi_unknown_name;
return dev_name(bdi->dev);
}
const char *bdi_dev_name(struct backing_dev_info *bdi);
#endif /* _LINUX_BACKING_DEV_H */

View file

@ -329,13 +329,12 @@ struct dma_buf {
/**
* struct dma_buf_attach_ops - importer operations for an attachment
* @move_notify: [optional] notification that the DMA-buf is moving
*
* Attachment operations implemented by the importer.
*/
struct dma_buf_attach_ops {
/**
* @move_notify
* @move_notify: [optional] notification that the DMA-buf is moving
*
* If this callback is provided the framework can avoid pinning the
* backing store while mappings exists.

View file

@ -83,9 +83,9 @@ enum dma_transfer_direction {
/**
* Interleaved Transfer Request
* ----------------------------
* A chunk is collection of contiguous bytes to be transfered.
* A chunk is collection of contiguous bytes to be transferred.
* The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
* ICGs may or maynot change between chunks.
* ICGs may or may not change between chunks.
* A FRAME is the smallest series of contiguous {chunk,icg} pairs,
* that when repeated an integral number of times, specifies the transfer.
* A transfer template is specification of a Frame, the number of times
@ -341,13 +341,11 @@ struct dma_chan {
* @chan: driver channel device
* @device: sysfs device
* @dev_id: parent dma_device dev_id
* @idr_ref: reference count to gate release of dma_device dev_id
*/
struct dma_chan_dev {
struct dma_chan *chan;
struct device device;
int dev_id;
atomic_t *idr_ref;
};
/**
@ -835,6 +833,8 @@ struct dma_device {
int dev_id;
struct device *dev;
struct module *owner;
struct ida chan_ida;
struct mutex chan_mutex; /* to protect chan_ida */
u32 src_addr_widths;
u32 dst_addr_widths;
@ -1069,7 +1069,7 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan)
* dmaengine_synchronize() needs to be called before it is safe to free
* any memory that is accessed by previously submitted descriptors or before
* freeing any resources accessed from within the completion callback of any
* perviously submitted descriptors.
* previously submitted descriptors.
*
* This function can be called from atomic context as well as from within a
* complete callback of a descriptor submitted on the same channel.
@ -1091,7 +1091,7 @@ static inline int dmaengine_terminate_async(struct dma_chan *chan)
*
* Synchronizes to the DMA channel termination to the current context. When this
* function returns it is guaranteed that all transfers for previously issued
* descriptors have stopped and and it is safe to free the memory assoicated
* descriptors have stopped and it is safe to free the memory associated
* with them. Furthermore it is guaranteed that all complete callback functions
* for a previously submitted descriptor have finished running and it is safe to
* free resources accessed from within the complete callbacks.

View file

@ -983,7 +983,7 @@ struct file_handle {
__u32 handle_bytes;
int handle_type;
/* file identifier */
unsigned char f_handle[0];
unsigned char f_handle[];
};
static inline struct file *get_file(struct file *f)

View file

@ -55,7 +55,7 @@ LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm)
LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm)
LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc,
struct fs_context *src_sc)
LSM_HOOK(int, 0, fs_context_parse_param, struct fs_context *fc,
LSM_HOOK(int, -ENOPARAM, fs_context_parse_param, struct fs_context *fc,
struct fs_parameter *param)
LSM_HOOK(int, 0, sb_alloc_security, struct super_block *sb)
LSM_HOOK(void, LSM_RET_VOID, sb_free_security, struct super_block *sb)

View file

@ -53,9 +53,9 @@ enum mhi_callback {
* @MHI_CHAIN: Linked transfer
*/
enum mhi_flags {
MHI_EOB,
MHI_EOT,
MHI_CHAIN,
MHI_EOB = BIT(0),
MHI_EOT = BIT(1),
MHI_CHAIN = BIT(2),
};
/**
@ -335,14 +335,15 @@ struct mhi_controller_config {
* @syserr_worker: System error worker
* @state_event: State change event
* @status_cb: CB function to notify power states of the device (required)
* @link_status: CB function to query link status of the device (required)
* @wake_get: CB function to assert device wake (optional)
* @wake_put: CB function to de-assert device wake (optional)
* @wake_toggle: CB function to assert and de-assert device wake (optional)
* @runtime_get: CB function to controller runtime resume (required)
* @runtimet_put: CB function to decrement pm usage (required)
* @runtime_put: CB function to decrement pm usage (required)
* @map_single: CB function to create TRE buffer
* @unmap_single: CB function to destroy TRE buffer
* @read_reg: Read a MHI register via the physical link (required)
* @write_reg: Write a MHI register via the physical link (required)
* @buffer_len: Bounce buffer length
* @bounce_buf: Use of bounce buffer
* @fbc_download: MHI host needs to do complete image transfer (optional)
@ -417,7 +418,6 @@ struct mhi_controller {
void (*status_cb)(struct mhi_controller *mhi_cntrl,
enum mhi_callback cb);
int (*link_status)(struct mhi_controller *mhi_cntrl);
void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override);
void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override);
void (*wake_toggle)(struct mhi_controller *mhi_cntrl);
@ -427,6 +427,10 @@ struct mhi_controller {
struct mhi_buf_info *buf);
void (*unmap_single)(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf);
int (*read_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
u32 *out);
void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
u32 val);
size_t buffer_len;
bool bounce_buf;

View file

@ -1317,11 +1317,13 @@ struct nfs41_impl_id {
struct nfstime4 date;
};
#define MAX_BIND_CONN_TO_SESSION_RETRIES 3
struct nfs41_bind_conn_to_session_args {
struct nfs_client *client;
struct nfs4_sessionid sessionid;
u32 dir;
bool use_conn_in_rdma_mode;
int retries;
};
struct nfs41_bind_conn_to_session_res {

View file

@ -185,6 +185,7 @@ int cros_ec_sensorhub_register_push_data(struct cros_ec_sensorhub *sensorhub,
void cros_ec_sensorhub_unregister_push_data(struct cros_ec_sensorhub *sensorhub,
u8 sensor_num);
int cros_ec_sensorhub_ring_allocate(struct cros_ec_sensorhub *sensorhub);
int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub);
void cros_ec_sensorhub_ring_remove(void *arg);
int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub,

View file

@ -25,6 +25,7 @@ struct platform_device {
bool id_auto;
struct device dev;
u64 platform_dma_mask;
struct device_dma_parameters dma_parms;
u32 num_resources;
struct resource *resource;

View file

@ -71,7 +71,13 @@ struct rpc_clnt {
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct dentry *cl_debugfs; /* debugfs directory */
#endif
struct rpc_xprt_iter cl_xpi;
/* cl_work is only needed after cl_xpi is no longer used,
* and that are of similar size
*/
union {
struct rpc_xprt_iter cl_xpi;
struct work_struct cl_work;
};
const struct cred *cl_cred;
};
@ -236,4 +242,9 @@ static inline int rpc_reply_expected(struct rpc_task *task)
(task->tk_msg.rpc_proc->p_decode != NULL);
}
static inline void rpc_task_close_connection(struct rpc_task *task)
{
if (task->tk_xprt)
xprt_force_disconnect(task->tk_xprt);
}
#endif /* _LINUX_SUNRPC_CLNT_H */

View file

@ -78,47 +78,6 @@ struct tcp_sack_block {
#define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */
#define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/
#if IS_ENABLED(CONFIG_MPTCP)
struct mptcp_options_received {
u64 sndr_key;
u64 rcvr_key;
u64 data_ack;
u64 data_seq;
u32 subflow_seq;
u16 data_len;
u16 mp_capable : 1,
mp_join : 1,
dss : 1,
add_addr : 1,
rm_addr : 1,
family : 4,
echo : 1,
backup : 1;
u32 token;
u32 nonce;
u64 thmac;
u8 hmac[20];
u8 join_id;
u8 use_map:1,
dsn64:1,
data_fin:1,
use_ack:1,
ack64:1,
mpc_map:1,
__unused:2;
u8 addr_id;
u8 rm_id;
union {
struct in_addr addr;
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
struct in6_addr addr6;
#endif
};
u64 ahmac;
u16 port;
};
#endif
struct tcp_options_received {
/* PAWS/RTTM data */
int ts_recent_stamp;/* Time we stored ts_recent (for aging) */
@ -136,9 +95,6 @@ struct tcp_options_received {
u8 num_sacks; /* Number of SACK blocks */
u16 user_mss; /* mss requested by user in ioctl */
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
#if IS_ENABLED(CONFIG_MPTCP)
struct mptcp_options_received mptcp;
#endif
};
static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
@ -148,13 +104,6 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
#if IS_ENABLED(CONFIG_SMC)
rx_opt->smc_ok = 0;
#endif
#if IS_ENABLED(CONFIG_MPTCP)
rx_opt->mptcp.mp_capable = 0;
rx_opt->mptcp.mp_join = 0;
rx_opt->mptcp.add_addr = 0;
rx_opt->mptcp.rm_addr = 0;
rx_opt->mptcp.dss = 0;
#endif
}
/* This is the max number of SACKS that we'll generate and process. It's safe

View file

@ -66,7 +66,7 @@ struct tty_buffer {
int read;
int flags;
/* Data points here */
unsigned long data[0];
unsigned long data[];
};
/* Values for .flags field of tty_buffer */

View file

@ -3,6 +3,8 @@
#define _LINUX_VIRTIO_NET_H
#include <linux/if_vlan.h>
#include <uapi/linux/tcp.h>
#include <uapi/linux/udp.h>
#include <uapi/linux/virtio_net.h>
static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
@ -28,17 +30,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
bool little_endian)
{
unsigned int gso_type = 0;
unsigned int thlen = 0;
unsigned int ip_proto;
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
gso_type = SKB_GSO_TCPV4;
ip_proto = IPPROTO_TCP;
thlen = sizeof(struct tcphdr);
break;
case VIRTIO_NET_HDR_GSO_TCPV6:
gso_type = SKB_GSO_TCPV6;
ip_proto = IPPROTO_TCP;
thlen = sizeof(struct tcphdr);
break;
case VIRTIO_NET_HDR_GSO_UDP:
gso_type = SKB_GSO_UDP;
ip_proto = IPPROTO_UDP;
thlen = sizeof(struct udphdr);
break;
default:
return -EINVAL;
@ -57,16 +67,22 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (!skb_partial_csum_set(skb, start, off))
return -EINVAL;
if (skb_transport_offset(skb) + thlen > skb_headlen(skb))
return -EINVAL;
} else {
/* gso packets without NEEDS_CSUM do not set transport_offset.
* probe and drop if does not match one of the above types.
*/
if (gso_type && skb->network_header) {
struct flow_keys_basic keys;
if (!skb->protocol)
virtio_net_hdr_set_proto(skb, hdr);
retry:
skb_probe_transport_header(skb);
if (!skb_transport_header_was_set(skb)) {
if (!skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
NULL, 0, 0, 0,
0)) {
/* UFO does not specify ipv4 or 6: try both */
if (gso_type & SKB_GSO_UDP &&
skb->protocol == htons(ETH_P_IP)) {
@ -75,6 +91,12 @@ retry:
}
return -EINVAL;
}
if (keys.control.thoff + thlen > skb_headlen(skb) ||
keys.basic.ip_proto != ip_proto)
return -EINVAL;
skb_set_transport_header(skb, keys.control.thoff);
}
}

View file

@ -48,6 +48,7 @@ struct virtio_vsock_pkt {
u32 len;
u32 off;
bool reply;
bool tap_delivered;
};
struct virtio_vsock_pkt_info {