v5.18 merge window pull request
Patchces for the merge window: - Minor bug fixes in mlx5, mthca, pvrdma, rtrs, mlx4, hfi1, hns - Minor cleanups: coding style, useless includes and documentation - Reorganize how multicast processing works in rxe - Replace a red/black tree with xarray in rxe which improves performance - DSCP support and HW address handle re-use in irdma - Simplify the mailbox command handling in hns - Simplify iser now that FMR is eliminated -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAmI7d8gACgkQOG33FX4g mxq9TRAAm/3rEBtDyVan4Dm3mzq8xeJOTdYtQ29QNWZcwvhStNTo4c29jQJITZ9X Xd+m8VEPWzjkkZ4+hDFM9HhWjOzqDcCCFcbF88WDO0+P1M3HYYUE6AbIcSilAaT2 GIby3+kAnsTOAryrSzHLehYJKUYJuw5NRGsVAPY3r6hz3UECjNb/X1KTWhXaIfNy 2OTqFx5wMQ6hZO8e4a2Wz4bBYAYm95UfK+TgfRqUwhDLCDnkDELvHMPh9pXxJecG xzVg7W7Nv+6GWpUrqtM6W6YpriyjUOfbrtCJmBV3T/EdpiD6lZhKpkX23aLgu2Bi XoMM67aZ0i1Ft2trCIF8GbLaejG6epBkeKkUMMSozKqFP5DjhNbD2f3WAlMI15iW CcdyPS5re1ymPp66UlycTDSkN19wD8LfgQPLJCNM3EV8g8qs9LaKzEPWunBoZmw1 a+QX2zK8J07S21F8iq2sf/Qe1EDdmgOEOf7pmB/A5/PKqgXZPG7lYNYuhIKyFsdL mO+BqWWp0a953JJjsiutxyUCyUd8jtwwxRa0B66oqSQ1jO+xj6XDxjEL8ACuT8Iq 9wFJluTCN6lfxyV8mrSfweT0fQh/W/zVF7EJZHWdKXEzuADxMp9X06wtMQ82ea+k /wgN7DUpBZczRtlqaxt1VSkER75QwAMy/oSpA974+O120QCwLMQ= =aux1 -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma updates from Jason Gunthorpe: - Minor bug fixes in mlx5, mthca, pvrdma, rtrs, mlx4, hfi1, hns - Minor cleanups: coding style, useless includes and documentation - Reorganize how multicast processing works in rxe - Replace a red/black tree with xarray in rxe which improves performance - DSCP support and HW address handle re-use in irdma - Simplify the mailbox command handling in hns - Simplify iser now that FMR is eliminated * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (93 commits) RDMA/nldev: Prevent underflow in nldev_stat_set_counter_dynamic_doit() IB/iser: Fix error flow in case of registration failure IB/iser: Generalize map/unmap dma tasks IB/iser: Use iser_fr_desc as registration context IB/iser: Remove iser_reg_data_sg helper function RDMA/rxe: Use standard names for ref counting RDMA/rxe: Replace red-black trees by xarrays RDMA/rxe: Shorten pool names in rxe_pool.c RDMA/rxe: Move max_elem into rxe_type_info RDMA/rxe: Replace obj by elem in declaration RDMA/rxe: Delete _locked() APIs for pool objects RDMA/rxe: Reverse the sense of RXE_POOL_NO_ALLOC RDMA/rxe: Replace mr by rkey in responder resources RDMA/rxe: Fix ref error in rxe_av.c RDMA/hns: Use the reserved loopback QPs to free MR before destroying MPT RDMA/irdma: Add support for address handle re-use RDMA/qib: Fix typos in comments RDMA/mlx5: Fix memory leak in error flow for subscribe event routine Revert "RDMA/core: Fix ib_qp_usecnt_dec() called when error" RDMA/rxe: Remove useless argument for update_state() ...
This commit is contained in:
commit
2dacc1e57b
106 changed files with 2881 additions and 2886 deletions
|
|
@ -32,7 +32,6 @@
|
|||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/if_arp.h> /* For ARPHRD_xxx */
|
||||
#include <linux/module.h>
|
||||
#include <net/rtnetlink.h>
|
||||
#include "ipoib.h"
|
||||
|
||||
|
|
|
|||
|
|
@ -30,7 +30,6 @@
|
|||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched/signal.h>
|
||||
|
||||
#include <linux/init.h>
|
||||
|
|
|
|||
|
|
@ -203,12 +203,12 @@ struct iser_reg_resources;
|
|||
*
|
||||
* @sge: memory region sg element
|
||||
* @rkey: memory region remote key
|
||||
* @mem_h: pointer to registration context (FMR/Fastreg)
|
||||
* @desc: pointer to fast registration context
|
||||
*/
|
||||
struct iser_mem_reg {
|
||||
struct ib_sge sge;
|
||||
u32 rkey;
|
||||
void *mem_h;
|
||||
struct ib_sge sge;
|
||||
u32 rkey;
|
||||
struct iser_fr_desc *desc;
|
||||
};
|
||||
|
||||
enum iser_desc_type {
|
||||
|
|
@ -531,13 +531,12 @@ int iser_post_recvm(struct iser_conn *iser_conn,
|
|||
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc);
|
||||
|
||||
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
|
||||
struct iser_data_buf *data,
|
||||
enum iser_data_dir iser_dir,
|
||||
enum dma_data_direction dma_dir);
|
||||
|
||||
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
|
||||
struct iser_data_buf *data,
|
||||
enum dma_data_direction dir);
|
||||
enum iser_data_dir iser_dir,
|
||||
enum dma_data_direction dma_dir);
|
||||
|
||||
int iser_initialize_task_headers(struct iscsi_task *task,
|
||||
struct iser_tx_desc *tx_desc);
|
||||
|
|
|
|||
|
|
@ -52,30 +52,17 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
|
|||
struct iser_mem_reg *mem_reg;
|
||||
int err;
|
||||
struct iser_ctrl *hdr = &iser_task->desc.iser_header;
|
||||
struct iser_data_buf *buf_in = &iser_task->data[ISER_DIR_IN];
|
||||
|
||||
err = iser_dma_map_task_data(iser_task,
|
||||
buf_in,
|
||||
ISER_DIR_IN,
|
||||
DMA_FROM_DEVICE);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (scsi_prot_sg_count(iser_task->sc)) {
|
||||
struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN];
|
||||
|
||||
err = iser_dma_map_task_data(iser_task,
|
||||
pbuf_in,
|
||||
ISER_DIR_IN,
|
||||
DMA_FROM_DEVICE);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = iser_reg_mem_fastreg(iser_task, ISER_DIR_IN, false);
|
||||
if (err) {
|
||||
iser_err("Failed to set up Data-IN RDMA\n");
|
||||
return err;
|
||||
goto out_err;
|
||||
}
|
||||
mem_reg = &iser_task->rdma_reg[ISER_DIR_IN];
|
||||
|
||||
|
|
@ -88,6 +75,10 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
|
|||
(unsigned long long)mem_reg->sge.addr);
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
iser_dma_unmap_task_data(iser_task, ISER_DIR_IN, DMA_FROM_DEVICE);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Register user buffer memory and initialize passive rdma
|
||||
|
|
@ -106,28 +97,16 @@ static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz,
|
|||
struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
|
||||
|
||||
err = iser_dma_map_task_data(iser_task,
|
||||
buf_out,
|
||||
ISER_DIR_OUT,
|
||||
DMA_TO_DEVICE);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (scsi_prot_sg_count(iser_task->sc)) {
|
||||
struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT];
|
||||
|
||||
err = iser_dma_map_task_data(iser_task,
|
||||
pbuf_out,
|
||||
ISER_DIR_OUT,
|
||||
DMA_TO_DEVICE);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = iser_reg_mem_fastreg(iser_task, ISER_DIR_OUT,
|
||||
buf_out->data_len == imm_sz);
|
||||
if (err != 0) {
|
||||
if (err) {
|
||||
iser_err("Failed to register write cmd RDMA mem\n");
|
||||
return err;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
|
||||
|
|
@ -154,6 +133,10 @@ static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz,
|
|||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT, DMA_TO_DEVICE);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* creates a new tx descriptor and adds header regd buffer */
|
||||
|
|
@ -619,13 +602,13 @@ static int iser_check_remote_inv(struct iser_conn *iser_conn, struct ib_wc *wc,
|
|||
struct iser_fr_desc *desc;
|
||||
|
||||
if (iser_task->dir[ISER_DIR_IN]) {
|
||||
desc = iser_task->rdma_reg[ISER_DIR_IN].mem_h;
|
||||
desc = iser_task->rdma_reg[ISER_DIR_IN].desc;
|
||||
if (unlikely(iser_inv_desc(desc, rkey)))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (iser_task->dir[ISER_DIR_OUT]) {
|
||||
desc = iser_task->rdma_reg[ISER_DIR_OUT].mem_h;
|
||||
desc = iser_task->rdma_reg[ISER_DIR_OUT].desc;
|
||||
if (unlikely(iser_inv_desc(desc, rkey)))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
@ -740,27 +723,16 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
|
|||
|
||||
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
|
||||
{
|
||||
int prot_count = scsi_prot_sg_count(iser_task->sc);
|
||||
|
||||
if (iser_task->dir[ISER_DIR_IN]) {
|
||||
iser_unreg_mem_fastreg(iser_task, ISER_DIR_IN);
|
||||
iser_dma_unmap_task_data(iser_task,
|
||||
&iser_task->data[ISER_DIR_IN],
|
||||
iser_dma_unmap_task_data(iser_task, ISER_DIR_IN,
|
||||
DMA_FROM_DEVICE);
|
||||
if (prot_count)
|
||||
iser_dma_unmap_task_data(iser_task,
|
||||
&iser_task->prot[ISER_DIR_IN],
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
if (iser_task->dir[ISER_DIR_OUT]) {
|
||||
iser_unreg_mem_fastreg(iser_task, ISER_DIR_OUT);
|
||||
iser_dma_unmap_task_data(iser_task,
|
||||
&iser_task->data[ISER_DIR_OUT],
|
||||
iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT,
|
||||
DMA_TO_DEVICE);
|
||||
if (prot_count)
|
||||
iser_dma_unmap_task_data(iser_task,
|
||||
&iser_task->prot[ISER_DIR_OUT],
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,7 +30,6 @@
|
|||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
|
|
@ -71,10 +70,10 @@ static void iser_reg_desc_put_fr(struct ib_conn *ib_conn,
|
|||
}
|
||||
|
||||
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
|
||||
struct iser_data_buf *data,
|
||||
enum iser_data_dir iser_dir,
|
||||
enum dma_data_direction dma_dir)
|
||||
{
|
||||
struct iser_data_buf *data = &iser_task->data[iser_dir];
|
||||
struct ib_device *dev;
|
||||
|
||||
iser_task->dir[iser_dir] = 1;
|
||||
|
|
@ -85,17 +84,40 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
|
|||
iser_err("dma_map_sg failed!!!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (scsi_prot_sg_count(iser_task->sc)) {
|
||||
struct iser_data_buf *pdata = &iser_task->prot[iser_dir];
|
||||
|
||||
pdata->dma_nents = ib_dma_map_sg(dev, pdata->sg, pdata->size, dma_dir);
|
||||
if (unlikely(pdata->dma_nents == 0)) {
|
||||
iser_err("protection dma_map_sg failed!!!\n");
|
||||
goto out_unmap;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_unmap:
|
||||
ib_dma_unmap_sg(dev, data->sg, data->size, dma_dir);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
|
||||
struct iser_data_buf *data,
|
||||
enum dma_data_direction dir)
|
||||
enum iser_data_dir iser_dir,
|
||||
enum dma_data_direction dma_dir)
|
||||
{
|
||||
struct iser_data_buf *data = &iser_task->data[iser_dir];
|
||||
struct ib_device *dev;
|
||||
|
||||
dev = iser_task->iser_conn->ib_conn.device->ib_device;
|
||||
ib_dma_unmap_sg(dev, data->sg, data->size, dir);
|
||||
ib_dma_unmap_sg(dev, data->sg, data->size, dma_dir);
|
||||
|
||||
if (scsi_prot_sg_count(iser_task->sc)) {
|
||||
struct iser_data_buf *pdata = &iser_task->prot[iser_dir];
|
||||
|
||||
ib_dma_unmap_sg(dev, pdata->sg, pdata->size, dma_dir);
|
||||
}
|
||||
}
|
||||
|
||||
static int iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
|
||||
|
|
@ -130,7 +152,7 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
|
|||
struct iser_fr_desc *desc;
|
||||
struct ib_mr_status mr_status;
|
||||
|
||||
desc = reg->mem_h;
|
||||
desc = reg->desc;
|
||||
if (!desc)
|
||||
return;
|
||||
|
||||
|
|
@ -147,8 +169,8 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
|
|||
ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS,
|
||||
&mr_status);
|
||||
}
|
||||
iser_reg_desc_put_fr(&iser_task->iser_conn->ib_conn, reg->mem_h);
|
||||
reg->mem_h = NULL;
|
||||
iser_reg_desc_put_fr(&iser_task->iser_conn->ib_conn, reg->desc);
|
||||
reg->desc = NULL;
|
||||
}
|
||||
|
||||
static void iser_set_dif_domain(struct scsi_cmnd *sc,
|
||||
|
|
@ -327,40 +349,26 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int iser_reg_data_sg(struct iscsi_iser_task *task,
|
||||
struct iser_data_buf *mem,
|
||||
struct iser_fr_desc *desc, bool use_dma_key,
|
||||
struct iser_mem_reg *reg)
|
||||
{
|
||||
struct iser_device *device = task->iser_conn->ib_conn.device;
|
||||
|
||||
if (use_dma_key)
|
||||
return iser_reg_dma(device, mem, reg);
|
||||
|
||||
return iser_fast_reg_mr(task, mem, &desc->rsc, reg);
|
||||
}
|
||||
|
||||
int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
|
||||
enum iser_data_dir dir,
|
||||
bool all_imm)
|
||||
{
|
||||
struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
|
||||
struct iser_device *device = ib_conn->device;
|
||||
struct iser_data_buf *mem = &task->data[dir];
|
||||
struct iser_mem_reg *reg = &task->rdma_reg[dir];
|
||||
struct iser_fr_desc *desc = NULL;
|
||||
struct iser_fr_desc *desc;
|
||||
bool use_dma_key;
|
||||
int err;
|
||||
|
||||
use_dma_key = mem->dma_nents == 1 && (all_imm || !iser_always_reg) &&
|
||||
scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL;
|
||||
if (use_dma_key)
|
||||
return iser_reg_dma(device, mem, reg);
|
||||
|
||||
if (!use_dma_key) {
|
||||
desc = iser_reg_desc_get_fr(ib_conn);
|
||||
reg->mem_h = desc;
|
||||
}
|
||||
|
||||
desc = iser_reg_desc_get_fr(ib_conn);
|
||||
if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) {
|
||||
err = iser_reg_data_sg(task, mem, desc, use_dma_key, reg);
|
||||
err = iser_fast_reg_mr(task, mem, &desc->rsc, reg);
|
||||
if (unlikely(err))
|
||||
goto err_reg;
|
||||
} else {
|
||||
|
|
@ -372,11 +380,12 @@ int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
|
|||
desc->sig_protected = true;
|
||||
}
|
||||
|
||||
reg->desc = desc;
|
||||
|
||||
return 0;
|
||||
|
||||
err_reg:
|
||||
if (desc)
|
||||
iser_reg_desc_put_fr(ib_conn, desc);
|
||||
iser_reg_desc_put_fr(ib_conn, desc);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -32,7 +32,6 @@
|
|||
* SOFTWARE.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
|
|
@ -905,7 +904,7 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
|
|||
enum iser_data_dir cmd_dir, sector_t *sector)
|
||||
{
|
||||
struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
|
||||
struct iser_fr_desc *desc = reg->mem_h;
|
||||
struct iser_fr_desc *desc = reg->desc;
|
||||
unsigned long sector_size = iser_task->sc->device->sector_size;
|
||||
struct ib_mr_status mr_status;
|
||||
int ret;
|
||||
|
|
|
|||
|
|
@ -50,7 +50,6 @@
|
|||
* netdev functionality.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/crc32.h>
|
||||
|
||||
|
|
|
|||
|
|
@ -156,8 +156,7 @@ static DEVICE_ATTR_RW(mpath_policy);
|
|||
static ssize_t add_path_show(struct device *dev,
|
||||
struct device_attribute *attr, char *page)
|
||||
{
|
||||
return sysfs_emit(
|
||||
page,
|
||||
return sysfs_emit(page,
|
||||
"Usage: echo [<source addr>@]<destination addr> > %s\n\n*addr ::= [ ip:<ipv4|ipv6> | gid:<gid> ]\n",
|
||||
attr->attr.name);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -297,6 +297,7 @@ static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path,
|
|||
return changed;
|
||||
}
|
||||
|
||||
static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path);
|
||||
static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
|
||||
{
|
||||
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
|
||||
|
|
@ -304,16 +305,7 @@ static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
|
|||
if (rtrs_clt_change_state_from_to(clt_path,
|
||||
RTRS_CLT_CONNECTED,
|
||||
RTRS_CLT_RECONNECTING)) {
|
||||
struct rtrs_clt_sess *clt = clt_path->clt;
|
||||
unsigned int delay_ms;
|
||||
|
||||
/*
|
||||
* Normal scenario, reconnect if we were successfully connected
|
||||
*/
|
||||
delay_ms = clt->reconnect_delay_sec * 1000;
|
||||
queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
|
||||
msecs_to_jiffies(delay_ms +
|
||||
prandom_u32() % RTRS_RECONNECT_SEED));
|
||||
queue_work(rtrs_wq, &clt_path->err_recovery_work);
|
||||
} else {
|
||||
/*
|
||||
* Error can happen just on establishing new connection,
|
||||
|
|
@ -917,7 +909,7 @@ static inline void path_it_deinit(struct path_it *it)
|
|||
{
|
||||
struct list_head *skip, *tmp;
|
||||
/*
|
||||
* The skip_list is used only for the MIN_INFLIGHT policy.
|
||||
* The skip_list is used only for the MIN_INFLIGHT and MIN_LATENCY policies.
|
||||
* We need to remove paths from it, so that next IO can insert
|
||||
* paths (->mp_skip_entry) into a skip_list again.
|
||||
*/
|
||||
|
|
@ -1511,6 +1503,22 @@ static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path)
|
|||
static void rtrs_clt_reconnect_work(struct work_struct *work);
|
||||
static void rtrs_clt_close_work(struct work_struct *work);
|
||||
|
||||
static void rtrs_clt_err_recovery_work(struct work_struct *work)
|
||||
{
|
||||
struct rtrs_clt_path *clt_path;
|
||||
struct rtrs_clt_sess *clt;
|
||||
int delay_ms;
|
||||
|
||||
clt_path = container_of(work, struct rtrs_clt_path, err_recovery_work);
|
||||
clt = clt_path->clt;
|
||||
delay_ms = clt->reconnect_delay_sec * 1000;
|
||||
rtrs_clt_stop_and_destroy_conns(clt_path);
|
||||
queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
|
||||
msecs_to_jiffies(delay_ms +
|
||||
prandom_u32() %
|
||||
RTRS_RECONNECT_SEED));
|
||||
}
|
||||
|
||||
static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
|
||||
const struct rtrs_addr *path,
|
||||
size_t con_num, u32 nr_poll_queues)
|
||||
|
|
@ -1562,6 +1570,7 @@ static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
|
|||
clt_path->state = RTRS_CLT_CONNECTING;
|
||||
atomic_set(&clt_path->connected_cnt, 0);
|
||||
INIT_WORK(&clt_path->close_work, rtrs_clt_close_work);
|
||||
INIT_WORK(&clt_path->err_recovery_work, rtrs_clt_err_recovery_work);
|
||||
INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work);
|
||||
rtrs_clt_init_hb(clt_path);
|
||||
|
||||
|
|
@ -2326,6 +2335,7 @@ static void rtrs_clt_close_work(struct work_struct *work)
|
|||
|
||||
clt_path = container_of(work, struct rtrs_clt_path, close_work);
|
||||
|
||||
cancel_work_sync(&clt_path->err_recovery_work);
|
||||
cancel_delayed_work_sync(&clt_path->reconnect_dwork);
|
||||
rtrs_clt_stop_and_destroy_conns(clt_path);
|
||||
rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL);
|
||||
|
|
@ -2638,7 +2648,6 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
|
|||
{
|
||||
struct rtrs_clt_path *clt_path;
|
||||
struct rtrs_clt_sess *clt;
|
||||
unsigned int delay_ms;
|
||||
int err;
|
||||
|
||||
clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path,
|
||||
|
|
@ -2655,8 +2664,6 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
|
|||
}
|
||||
clt_path->reconnect_attempts++;
|
||||
|
||||
/* Stop everything */
|
||||
rtrs_clt_stop_and_destroy_conns(clt_path);
|
||||
msleep(RTRS_RECONNECT_BACKOFF);
|
||||
if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) {
|
||||
err = init_path(clt_path);
|
||||
|
|
@ -2669,11 +2676,7 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
|
|||
reconnect_again:
|
||||
if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) {
|
||||
clt_path->stats->reconnects.fail_cnt++;
|
||||
delay_ms = clt->reconnect_delay_sec * 1000;
|
||||
queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
|
||||
msecs_to_jiffies(delay_ms +
|
||||
prandom_u32() %
|
||||
RTRS_RECONNECT_SEED));
|
||||
queue_work(rtrs_wq, &clt_path->err_recovery_work);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -2908,6 +2911,7 @@ int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path)
|
|||
&old_state);
|
||||
if (changed) {
|
||||
clt_path->reconnect_attempts = 0;
|
||||
rtrs_clt_stop_and_destroy_conns(clt_path);
|
||||
queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0);
|
||||
}
|
||||
if (changed || old_state == RTRS_CLT_RECONNECTING) {
|
||||
|
|
|
|||
|
|
@ -134,6 +134,7 @@ struct rtrs_clt_path {
|
|||
struct rtrs_clt_io_req *reqs;
|
||||
struct delayed_work reconnect_dwork;
|
||||
struct work_struct close_work;
|
||||
struct work_struct err_recovery_work;
|
||||
unsigned int reconnect_attempts;
|
||||
bool established;
|
||||
struct rtrs_rbuf *rbufs;
|
||||
|
|
|
|||
|
|
@ -479,7 +479,6 @@ static int rtrs_str_to_sockaddr(const char *addr, size_t len,
|
|||
*/
|
||||
int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len)
|
||||
{
|
||||
|
||||
switch (addr->sa_family) {
|
||||
case AF_IB:
|
||||
return scnprintf(buf, len, "gid:%pI6",
|
||||
|
|
|
|||
|
|
@ -92,6 +92,9 @@ enum srp_iu_type {
|
|||
};
|
||||
|
||||
/*
|
||||
* RDMA adapter in the initiator system.
|
||||
*
|
||||
* @dev_list: List of RDMA ports associated with this RDMA adapter (srp_host).
|
||||
* @mr_page_mask: HCA memory registration page mask.
|
||||
* @mr_page_size: HCA memory registration page size.
|
||||
* @mr_max_size: Maximum size in bytes of a single FR registration request.
|
||||
|
|
@ -109,6 +112,12 @@ struct srp_device {
|
|||
bool use_fast_reg;
|
||||
};
|
||||
|
||||
/*
|
||||
* One port of an RDMA adapter in the initiator system.
|
||||
*
|
||||
* @target_list: List of connected target ports (struct srp_target_port).
|
||||
* @target_lock: Protects @target_list.
|
||||
*/
|
||||
struct srp_host {
|
||||
struct srp_device *srp_dev;
|
||||
u8 port;
|
||||
|
|
@ -183,7 +192,7 @@ struct srp_rdma_ch {
|
|||
};
|
||||
|
||||
/**
|
||||
* struct srp_target_port
|
||||
* struct srp_target_port - RDMA port in the SRP target system
|
||||
* @comp_vector: Completion vector used by the first RDMA channel created for
|
||||
* this target port.
|
||||
*/
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue