2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/slab.h>
37 #include <linux/highmem.h>
38 #include <linux/scatterlist.h>
40 #include "iscsi_iser.h"
42 static struct iser_reg_ops fastreg_ops = {
43 .alloc_reg_res = iser_alloc_fastreg_pool,
44 .free_reg_res = iser_free_fastreg_pool,
45 .reg_rdma_mem = iser_reg_rdma_mem_fastreg,
46 .unreg_rdma_mem = iser_unreg_mem_fastreg,
47 .reg_desc_get = iser_reg_desc_get_fr,
48 .reg_desc_put = iser_reg_desc_put_fr,
51 static struct iser_reg_ops fmr_ops = {
52 .alloc_reg_res = iser_alloc_fmr_pool,
53 .free_reg_res = iser_free_fmr_pool,
54 .reg_rdma_mem = iser_reg_rdma_mem_fmr,
55 .unreg_rdma_mem = iser_unreg_mem_fmr,
56 .reg_desc_get = iser_reg_desc_get_fmr,
57 .reg_desc_put = iser_reg_desc_put_fmr,
60 int iser_assign_reg_ops(struct iser_device *device)
62 struct ib_device_attr *dev_attr = &device->dev_attr;
64 /* Assign function handles - based on FMR support */
65 if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
66 device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
67 iser_info("FMR supported, using FMR for registration\n");
68 device->reg_ops = &fmr_ops;
70 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
71 iser_info("FastReg supported, using FastReg for registration\n");
72 device->reg_ops = &fastreg_ops;
74 iser_err("IB device does not support FMRs nor FastRegs, can't register memory\n");
82 iser_free_bounce_sg(struct iser_data_buf *data)
84 struct scatterlist *sg;
87 for_each_sg(data->sg, sg, data->size, count)
88 __free_page(sg_page(sg));
92 data->sg = data->orig_sg;
93 data->size = data->orig_size;
99 iser_alloc_bounce_sg(struct iser_data_buf *data)
101 struct scatterlist *sg;
103 unsigned long length = data->data_len;
104 int i = 0, nents = DIV_ROUND_UP(length, PAGE_SIZE);
106 sg = kcalloc(nents, sizeof(*sg), GFP_ATOMIC);
110 sg_init_table(sg, nents);
112 u32 page_len = min_t(u32, length, PAGE_SIZE);
114 page = alloc_page(GFP_ATOMIC);
118 sg_set_page(&sg[i], page, page_len, 0);
123 data->orig_sg = data->sg;
124 data->orig_size = data->size;
132 __free_page(sg_page(&sg[i - 1]));
139 iser_copy_bounce(struct iser_data_buf *data, bool to_buffer)
141 struct scatterlist *osg, *bsg = data->sg;
143 unsigned int left = data->data_len;
144 unsigned int bsg_off = 0;
147 for_each_sg(data->orig_sg, osg, data->orig_size, i) {
148 unsigned int copy_len, osg_off = 0;
150 oaddr = kmap_atomic(sg_page(osg)) + osg->offset;
151 copy_len = min(left, osg->length);
153 unsigned int len = min(copy_len, bsg->length - bsg_off);
155 baddr = kmap_atomic(sg_page(bsg)) + bsg->offset;
157 memcpy(baddr + bsg_off, oaddr + osg_off, len);
159 memcpy(oaddr + osg_off, baddr + bsg_off, len);
161 kunmap_atomic(baddr - bsg->offset);
166 if (bsg_off >= bsg->length) {
171 kunmap_atomic(oaddr - osg->offset);
177 iser_copy_from_bounce(struct iser_data_buf *data)
179 iser_copy_bounce(data, false);
183 iser_copy_to_bounce(struct iser_data_buf *data)
185 iser_copy_bounce(data, true);
188 struct iser_fr_desc *
189 iser_reg_desc_get_fr(struct ib_conn *ib_conn)
191 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
192 struct iser_fr_desc *desc;
195 spin_lock_irqsave(&fr_pool->lock, flags);
196 desc = list_first_entry(&fr_pool->list,
197 struct iser_fr_desc, list);
198 list_del(&desc->list);
199 spin_unlock_irqrestore(&fr_pool->lock, flags);
205 iser_reg_desc_put_fr(struct ib_conn *ib_conn,
206 struct iser_fr_desc *desc)
208 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
211 spin_lock_irqsave(&fr_pool->lock, flags);
212 list_add(&desc->list, &fr_pool->list);
213 spin_unlock_irqrestore(&fr_pool->lock, flags);
216 struct iser_fr_desc *
217 iser_reg_desc_get_fmr(struct ib_conn *ib_conn)
219 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
221 return list_first_entry(&fr_pool->list,
222 struct iser_fr_desc, list);
226 iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
227 struct iser_fr_desc *desc)
232 * iser_start_rdma_unaligned_sg
234 static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
235 struct iser_data_buf *data,
236 enum iser_data_dir cmd_dir)
238 struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
241 rc = iser_alloc_bounce_sg(data);
243 iser_err("Failed to allocate bounce for data len %lu\n",
248 if (cmd_dir == ISER_DIR_OUT)
249 iser_copy_to_bounce(data);
251 data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size,
252 (cmd_dir == ISER_DIR_OUT) ?
253 DMA_TO_DEVICE : DMA_FROM_DEVICE);
254 if (!data->dma_nents) {
255 iser_err("Got dma_nents %d, something went wrong...\n",
263 iser_free_bounce_sg(data);
268 * iser_finalize_rdma_unaligned_sg
271 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
272 struct iser_data_buf *data,
273 enum iser_data_dir cmd_dir)
275 struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
277 ib_dma_unmap_sg(dev, data->sg, data->size,
278 (cmd_dir == ISER_DIR_OUT) ?
279 DMA_TO_DEVICE : DMA_FROM_DEVICE);
281 if (cmd_dir == ISER_DIR_IN)
282 iser_copy_from_bounce(data);
284 iser_free_bounce_sg(data);
287 #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0)
290 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses
291 * and returns the length of resulting physical address array (may be less than
292 * the original due to possible compaction).
294 * we build a "page vec" under the assumption that the SG meets the RDMA
295 * alignment requirements. Other then the first and last SG elements, all
296 * the "internal" elements can be compacted into a list whose elements are
297 * dma addresses of physical pages. The code supports also the weird case
298 * where --few fragments of the same page-- are present in the SG as
299 * consecutive elements. Also, it handles one entry SG.
302 static int iser_sg_to_page_vec(struct iser_data_buf *data,
303 struct ib_device *ibdev, u64 *pages,
304 int *offset, int *data_size)
306 struct scatterlist *sg, *sgl = data->sg;
307 u64 start_addr, end_addr, page, chunk_start = 0;
308 unsigned long total_sz = 0;
309 unsigned int dma_len;
310 int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
312 /* compute the offset of first element */
313 *offset = (u64) sgl[0].offset & ~MASK_4K;
317 for_each_sg(sgl, sg, data->dma_nents, i) {
318 start_addr = ib_sg_dma_address(ibdev, sg);
320 chunk_start = start_addr;
321 dma_len = ib_sg_dma_len(ibdev, sg);
322 end_addr = start_addr + dma_len;
325 /* collect page fragments until aligned or end of SG list */
326 if (!IS_4K_ALIGNED(end_addr) && i < last_ent) {
332 /* address of the first page in the contiguous chunk;
333 masking relevant for the very first SG entry,
334 which might be unaligned */
335 page = chunk_start & MASK_4K;
337 pages[cur_page++] = page;
339 } while (page < end_addr);
342 *data_size = total_sz;
343 iser_dbg("page_vec->data_size:%d cur_page %d\n",
344 *data_size, cur_page);
350 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned
351 * for RDMA sub-list of a scatter-gather list of memory buffers, and returns
352 * the number of entries which are aligned correctly. Supports the case where
353 * consecutive SG elements are actually fragments of the same physcial page.
355 static int iser_data_buf_aligned_len(struct iser_data_buf *data,
356 struct ib_device *ibdev)
358 struct scatterlist *sg, *sgl, *next_sg = NULL;
359 u64 start_addr, end_addr;
360 int i, ret_len, start_check = 0;
362 if (data->dma_nents == 1)
366 start_addr = ib_sg_dma_address(ibdev, sgl);
368 for_each_sg(sgl, sg, data->dma_nents, i) {
369 if (start_check && !IS_4K_ALIGNED(start_addr))
372 next_sg = sg_next(sg);
376 end_addr = start_addr + ib_sg_dma_len(ibdev, sg);
377 start_addr = ib_sg_dma_address(ibdev, next_sg);
379 if (end_addr == start_addr) {
385 if (!IS_4K_ALIGNED(end_addr))
388 ret_len = (next_sg) ? i : i+1;
390 if (unlikely(ret_len != data->dma_nents))
391 iser_warn("rdma alignment violation (%d/%d aligned)\n",
392 ret_len, data->dma_nents);
397 static void iser_data_buf_dump(struct iser_data_buf *data,
398 struct ib_device *ibdev)
400 struct scatterlist *sg;
403 for_each_sg(data->sg, sg, data->dma_nents, i)
404 iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
405 "off:0x%x sz:0x%x dma_len:0x%x\n",
406 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
407 sg_page(sg), sg->offset,
408 sg->length, ib_sg_dma_len(ibdev, sg));
411 static void iser_dump_page_vec(struct iser_page_vec *page_vec)
415 iser_err("page vec length %d data size %d\n",
416 page_vec->length, page_vec->data_size);
417 for (i = 0; i < page_vec->length; i++)
418 iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]);
421 int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
422 struct iser_data_buf *data,
423 enum iser_data_dir iser_dir,
424 enum dma_data_direction dma_dir)
426 struct ib_device *dev;
428 iser_task->dir[iser_dir] = 1;
429 dev = iser_task->iser_conn->ib_conn.device->ib_device;
431 data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
432 if (data->dma_nents == 0) {
433 iser_err("dma_map_sg failed!!!\n");
439 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
440 struct iser_data_buf *data,
441 enum dma_data_direction dir)
443 struct ib_device *dev;
445 dev = iser_task->iser_conn->ib_conn.device->ib_device;
446 ib_dma_unmap_sg(dev, data->sg, data->size, dir);
450 iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
451 struct iser_mem_reg *reg)
453 struct scatterlist *sg = mem->sg;
455 reg->sge.lkey = device->mr->lkey;
456 reg->rkey = device->mr->rkey;
457 reg->sge.addr = ib_sg_dma_address(device->ib_device, &sg[0]);
458 reg->sge.length = ib_sg_dma_len(device->ib_device, &sg[0]);
460 iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
461 " length=0x%x\n", reg->sge.lkey, reg->rkey,
462 reg->sge.addr, reg->sge.length);
467 static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
468 struct iser_data_buf *mem,
469 enum iser_data_dir cmd_dir)
471 struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
472 struct iser_device *device = iser_task->iser_conn->ib_conn.device;
474 iscsi_conn->fmr_unalign_cnt++;
476 if (iser_debug_level > 0)
477 iser_data_buf_dump(mem, device->ib_device);
479 /* unmap the command data before accessing it */
480 iser_dma_unmap_task_data(iser_task, mem,
481 (cmd_dir == ISER_DIR_OUT) ?
482 DMA_TO_DEVICE : DMA_FROM_DEVICE);
484 /* allocate copy buf, if we are writing, copy the */
485 /* unaligned scatterlist, dma map the copy */
486 if (iser_start_rdma_unaligned_sg(iser_task, mem, cmd_dir) != 0)
493 * iser_reg_page_vec - Register physical memory
495 * returns: 0 on success, errno code on failure
498 int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
499 struct iser_data_buf *mem,
500 struct iser_reg_resources *rsc,
501 struct iser_mem_reg *reg)
503 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
504 struct iser_device *device = ib_conn->device;
505 struct iser_page_vec *page_vec = rsc->page_vec;
506 struct ib_fmr_pool *fmr_pool = rsc->fmr_pool;
507 struct ib_pool_fmr *fmr;
510 plen = iser_sg_to_page_vec(mem, device->ib_device,
513 &page_vec->data_size);
514 page_vec->length = plen;
515 if (plen * SIZE_4K < page_vec->data_size) {
516 iser_err("page vec too short to hold this SG\n");
517 iser_data_buf_dump(mem, device->ib_device);
518 iser_dump_page_vec(page_vec);
522 fmr = ib_fmr_pool_map_phys(fmr_pool,
528 iser_err("ib_fmr_pool_map_phys failed: %d\n", ret);
532 reg->sge.lkey = fmr->fmr->lkey;
533 reg->rkey = fmr->fmr->rkey;
534 reg->sge.addr = page_vec->pages[0] + page_vec->offset;
535 reg->sge.length = page_vec->data_size;
542 * Unregister (previosuly registered using FMR) memory.
543 * If memory is non-FMR does nothing.
545 void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
546 enum iser_data_dir cmd_dir)
548 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
554 iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n", reg->mem_h);
556 ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
558 iser_err("ib_fmr_pool_unmap failed %d\n", ret);
563 void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
564 enum iser_data_dir cmd_dir)
566 struct iser_device *device = iser_task->iser_conn->ib_conn.device;
567 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
572 device->reg_ops->reg_desc_put(&iser_task->iser_conn->ib_conn,
578 * iser_reg_rdma_mem_fmr - Registers memory intended for RDMA,
579 * using FMR (if possible) obtaining rkey and va
581 * returns 0 on success, errno code on failure
583 int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
584 enum iser_data_dir cmd_dir)
586 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
587 struct iser_device *device = ib_conn->device;
588 struct ib_device *ibdev = device->ib_device;
589 struct iser_data_buf *mem = &iser_task->data[cmd_dir];
590 struct iser_mem_reg *mem_reg;
595 mem_reg = &iser_task->rdma_reg[cmd_dir];
597 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
598 if (aligned_len != mem->dma_nents) {
599 err = fall_to_bounce_buf(iser_task, mem, cmd_dir);
601 iser_err("failed to allocate bounce buffer\n");
606 /* if there a single dma entry, FMR is not needed */
607 if (mem->dma_nents == 1) {
608 return iser_reg_dma(device, mem, mem_reg);
609 } else { /* use FMR for multiple dma entries */
610 struct iser_fr_desc *desc;
612 desc = device->reg_ops->reg_desc_get(ib_conn);
613 err = iser_fast_reg_fmr(iser_task, mem, &desc->rsc, mem_reg);
614 if (err && err != -EAGAIN) {
615 iser_data_buf_dump(mem, ibdev);
616 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
618 ntoh24(iser_task->desc.iscsi_header.dlength));
619 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
620 desc->rsc.page_vec->data_size,
621 desc->rsc.page_vec->length,
622 desc->rsc.page_vec->offset);
623 for (i = 0; i < desc->rsc.page_vec->length; i++)
624 iser_err("page_vec[%d] = 0x%llx\n", i,
625 (unsigned long long)desc->rsc.page_vec->pages[i]);
634 iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs,
635 struct ib_sig_domain *domain)
637 domain->sig_type = IB_SIG_TYPE_T10_DIF;
638 domain->sig.dif.pi_interval = scsi_prot_interval(sc);
639 domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc);
641 * At the moment we hard code those, but in the future
642 * we will take them from sc.
644 domain->sig.dif.apptag_check_mask = 0xffff;
645 domain->sig.dif.app_escape = true;
646 domain->sig.dif.ref_escape = true;
647 if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
648 domain->sig.dif.ref_remap = true;
652 iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
654 switch (scsi_get_prot_op(sc)) {
655 case SCSI_PROT_WRITE_INSERT:
656 case SCSI_PROT_READ_STRIP:
657 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
658 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
659 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
661 case SCSI_PROT_READ_INSERT:
662 case SCSI_PROT_WRITE_STRIP:
663 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
664 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
665 sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
666 IB_T10DIF_CSUM : IB_T10DIF_CRC;
668 case SCSI_PROT_READ_PASS:
669 case SCSI_PROT_WRITE_PASS:
670 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->wire);
671 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
672 iser_set_dif_domain(sc, sig_attrs, &sig_attrs->mem);
673 sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
674 IB_T10DIF_CSUM : IB_T10DIF_CRC;
677 iser_err("Unsupported PI operation %d\n",
678 scsi_get_prot_op(sc));
686 iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
689 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
690 *mask |= ISER_CHECK_REFTAG;
691 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
692 *mask |= ISER_CHECK_GUARD;
696 iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
700 memset(inv_wr, 0, sizeof(*inv_wr));
701 inv_wr->opcode = IB_WR_LOCAL_INV;
702 inv_wr->wr_id = ISER_FASTREG_LI_WRID;
703 inv_wr->ex.invalidate_rkey = mr->rkey;
705 rkey = ib_inc_rkey(mr->rkey);
706 ib_update_fast_reg_key(mr, rkey);
710 iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
711 struct iser_pi_context *pi_ctx,
712 struct iser_mem_reg *data_reg,
713 struct iser_mem_reg *prot_reg,
714 struct iser_mem_reg *sig_reg)
716 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
717 struct ib_send_wr sig_wr, inv_wr;
718 struct ib_send_wr *bad_wr, *wr = NULL;
719 struct ib_sig_attrs sig_attrs;
722 memset(&sig_attrs, 0, sizeof(sig_attrs));
723 ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs);
727 iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask);
729 if (!pi_ctx->sig_mr_valid) {
730 iser_inv_rkey(&inv_wr, pi_ctx->sig_mr);
734 memset(&sig_wr, 0, sizeof(sig_wr));
735 sig_wr.opcode = IB_WR_REG_SIG_MR;
736 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
737 sig_wr.sg_list = &data_reg->sge;
739 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
740 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
741 if (scsi_prot_sg_count(iser_task->sc))
742 sig_wr.wr.sig_handover.prot = &prot_reg->sge;
743 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE |
744 IB_ACCESS_REMOTE_READ |
745 IB_ACCESS_REMOTE_WRITE;
752 ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
754 iser_err("reg_sig_mr failed, ret:%d\n", ret);
757 pi_ctx->sig_mr_valid = 0;
759 sig_reg->sge.lkey = pi_ctx->sig_mr->lkey;
760 sig_reg->rkey = pi_ctx->sig_mr->rkey;
761 sig_reg->sge.addr = 0;
762 sig_reg->sge.length = scsi_transfer_length(iser_task->sc);
764 iser_dbg("sig_sge: lkey: 0x%x, rkey: 0x%x, addr: 0x%llx, length: %u\n",
765 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
766 sig_reg->sge.length);
771 static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
772 struct iser_data_buf *mem,
773 struct iser_reg_resources *rsc,
774 struct iser_mem_reg *reg)
776 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
777 struct iser_device *device = ib_conn->device;
779 struct ib_fast_reg_page_list *frpl;
780 struct ib_send_wr fastreg_wr, inv_wr;
781 struct ib_send_wr *bad_wr, *wr = NULL;
782 int ret, offset, size, plen;
784 /* if there a single dma entry, dma mr suffices */
785 if (mem->dma_nents == 1)
786 return iser_reg_dma(device, mem, reg);
791 plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list,
793 if (plen * SIZE_4K < size) {
794 iser_err("fast reg page_list too short to hold this SG\n");
798 if (!rsc->mr_valid) {
799 iser_inv_rkey(&inv_wr, mr);
803 /* Prepare FASTREG WR */
804 memset(&fastreg_wr, 0, sizeof(fastreg_wr));
805 fastreg_wr.wr_id = ISER_FASTREG_LI_WRID;
806 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
807 fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset;
808 fastreg_wr.wr.fast_reg.page_list = frpl;
809 fastreg_wr.wr.fast_reg.page_list_len = plen;
810 fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K;
811 fastreg_wr.wr.fast_reg.length = size;
812 fastreg_wr.wr.fast_reg.rkey = mr->rkey;
813 fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
814 IB_ACCESS_REMOTE_WRITE |
815 IB_ACCESS_REMOTE_READ);
820 wr->next = &fastreg_wr;
822 ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
824 iser_err("fast registration failed, ret:%d\n", ret);
829 reg->sge.lkey = mr->lkey;
830 reg->rkey = mr->rkey;
831 reg->sge.addr = frpl->page_list[0] + offset;
832 reg->sge.length = size;
838 * iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA,
839 * using Fast Registration WR (if possible) obtaining rkey and va
841 * returns 0 on success, errno code on failure
843 int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
844 enum iser_data_dir cmd_dir)
846 struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
847 struct iser_device *device = ib_conn->device;
848 struct ib_device *ibdev = device->ib_device;
849 struct iser_data_buf *mem = &iser_task->data[cmd_dir];
850 struct iser_mem_reg *mem_reg = &iser_task->rdma_reg[cmd_dir];
851 struct iser_fr_desc *desc = NULL;
852 int err, aligned_len;
854 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
855 if (aligned_len != mem->dma_nents) {
856 err = fall_to_bounce_buf(iser_task, mem, cmd_dir);
858 iser_err("failed to allocate bounce buffer\n");
863 if (mem->dma_nents != 1 ||
864 scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
865 desc = device->reg_ops->reg_desc_get(ib_conn);
866 mem_reg->mem_h = desc;
869 err = iser_fast_reg_mr(iser_task, mem,
870 desc ? &desc->rsc : NULL, mem_reg);
874 if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
875 struct iser_mem_reg prot_reg;
877 memset(&prot_reg, 0, sizeof(prot_reg));
878 if (scsi_prot_sg_count(iser_task->sc)) {
879 mem = &iser_task->prot[cmd_dir];
880 aligned_len = iser_data_buf_aligned_len(mem, ibdev);
881 if (aligned_len != mem->dma_nents) {
882 err = fall_to_bounce_buf(iser_task, mem,
885 iser_err("failed to allocate bounce buffer\n");
890 err = iser_fast_reg_mr(iser_task, mem,
891 &desc->pi_ctx->rsc, &prot_reg);
896 err = iser_reg_sig_mr(iser_task, desc->pi_ctx, mem_reg,
899 iser_err("Failed to register signature mr\n");
902 desc->pi_ctx->sig_protected = 1;
908 device->reg_ops->reg_desc_put(ib_conn, desc);