Linux 5.7-rc4

-----BEGIN PGP SIGNATURE-----
 
 iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAl6vPf8eHHRvcnZhbGRz
 QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiG3ooH/0QNxSzms8GgWzm+
 VVg7c293rMJHKDwkzYTP2N308V7WtrSzEYfoN8We70mvSGPIg2Lc4xmTtuNSvF0r
 tyGYHP75WdSEb+eqcsw4diMz6bPGuNYQPLYOsgIsFeCogiSk8U9MAdOrophWA4MZ
 oCmqVupVjimzE5Tye1SVMH5apvEFzapCT5huOkdvuRQ7x6/pQvnHDm/yJg9++ouV
 ICDRjhP4dLspRzoEykmHR/eiNGUFJj4WoPYGLOYX5tvW9CVtwbGuvWAc4aXaUdHx
 4rxD5DQbvdK41lt5yqLnqsAahLDbdwmON0lEbU4U83wYFlJNOOeMs15wDR8rzVVy
 EH6M0uA=
 =UcvZ
 -----END PGP SIGNATURE-----

Merge tag 'v5.7-rc4' into core

Linux 5.7-rc4
This commit is contained in:
Joerg Roedel 2020-05-13 12:01:33 +02:00
commit ec9b40cffd
162 changed files with 1289 additions and 604 deletions

View file

@ -329,13 +329,12 @@ struct dma_buf {
/**
* struct dma_buf_attach_ops - importer operations for an attachment
* @move_notify: [optional] notification that the DMA-buf is moving
*
* Attachment operations implemented by the importer.
*/
struct dma_buf_attach_ops {
/**
* @move_notify
* @move_notify: [optional] notification that the DMA-buf is moving
*
* If this callback is provided the framework can avoid pinning the
* backing store while mappings exists.

View file

@ -83,9 +83,9 @@ enum dma_transfer_direction {
/**
* Interleaved Transfer Request
* ----------------------------
* A chunk is collection of contiguous bytes to be transfered.
* A chunk is collection of contiguous bytes to be transferred.
* The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
* ICGs may or maynot change between chunks.
* ICGs may or may not change between chunks.
* A FRAME is the smallest series of contiguous {chunk,icg} pairs,
* that when repeated an integral number of times, specifies the transfer.
* A transfer template is specification of a Frame, the number of times
@ -341,13 +341,11 @@ struct dma_chan {
* @chan: driver channel device
* @device: sysfs device
* @dev_id: parent dma_device dev_id
* @idr_ref: reference count to gate release of dma_device dev_id
*/
struct dma_chan_dev {
struct dma_chan *chan;
struct device device;
int dev_id;
atomic_t *idr_ref;
};
/**
@ -835,6 +833,8 @@ struct dma_device {
int dev_id;
struct device *dev;
struct module *owner;
struct ida chan_ida;
struct mutex chan_mutex; /* to protect chan_ida */
u32 src_addr_widths;
u32 dst_addr_widths;
@ -1069,7 +1069,7 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan)
* dmaengine_synchronize() needs to be called before it is safe to free
* any memory that is accessed by previously submitted descriptors or before
* freeing any resources accessed from within the completion callback of any
* perviously submitted descriptors.
* previously submitted descriptors.
*
* This function can be called from atomic context as well as from within a
* complete callback of a descriptor submitted on the same channel.
@ -1091,7 +1091,7 @@ static inline int dmaengine_terminate_async(struct dma_chan *chan)
*
* Synchronizes to the DMA channel termination to the current context. When this
* function returns it is guaranteed that all transfers for previously issued
* descriptors have stopped and and it is safe to free the memory assoicated
* descriptors have stopped and it is safe to free the memory associated
* with them. Furthermore it is guaranteed that all complete callback functions
* for a previously submitted descriptor have finished running and it is safe to
* free resources accessed from within the complete callbacks.

View file

@ -1317,11 +1317,13 @@ struct nfs41_impl_id {
struct nfstime4 date;
};
#define MAX_BIND_CONN_TO_SESSION_RETRIES 3
struct nfs41_bind_conn_to_session_args {
struct nfs_client *client;
struct nfs4_sessionid sessionid;
u32 dir;
bool use_conn_in_rdma_mode;
int retries;
};
struct nfs41_bind_conn_to_session_res {

View file

@ -71,7 +71,13 @@ struct rpc_clnt {
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct dentry *cl_debugfs; /* debugfs directory */
#endif
struct rpc_xprt_iter cl_xpi;
/* cl_work is only needed after cl_xpi is no longer used,
* and that are of similar size
*/
union {
struct rpc_xprt_iter cl_xpi;
struct work_struct cl_work;
};
const struct cred *cl_cred;
};
@ -236,4 +242,9 @@ static inline int rpc_reply_expected(struct rpc_task *task)
(task->tk_msg.rpc_proc->p_decode != NULL);
}
static inline void rpc_task_close_connection(struct rpc_task *task)
{
if (task->tk_xprt)
xprt_force_disconnect(task->tk_xprt);
}
#endif /* _LINUX_SUNRPC_CLNT_H */