This will allow us to unify the memory registration code path between
the various methods which vary by the device capabilities. This change
will make it easier and less intrusive to remove fmr_pools from the
code when we'd want to.
The reason we use a single descriptor is to avoid taking a
redundant spinlock when working with FMRs.
We also change the signature of iser_reg_page_vec to make it match
iser_fast_reg_mr (and the future indirect registration method).
Signed-off-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Adir Lev <adirl@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
* struct iser_reg_resources - Fast registration recources
*
* @mr: memory region
* struct iser_reg_resources - Fast registration recources
*
* @mr: memory region
- * @frpl: fast reg page list
+ * @fmr_pool: pool of fmrs
+ * @frpl: fast reg page list used by frwrs
+ * @page_vec: fast reg page list used by fmr pool
* @mr_valid: is mr valid indicator
*/
struct iser_reg_resources {
* @mr_valid: is mr valid indicator
*/
struct iser_reg_resources {
- struct ib_mr *mr;
- struct ib_fast_reg_page_list *frpl;
+ union {
+ struct ib_mr *mr;
+ struct ib_fmr_pool *fmr_pool;
+ };
+ union {
+ struct ib_fast_reg_page_list *frpl;
+ struct iser_page_vec *page_vec;
+ };
/**
* struct iser_fr_pool: connection fast registration pool
*
/**
* struct iser_fr_pool: connection fast registration pool
*
+ * @list: list of fastreg descriptors
* @lock: protects fmr/fastreg pool
* @lock: protects fmr/fastreg pool
- * @union.fmr:
- * @pool: FMR pool for fast registrations
- * @page_vec: fast reg page list to hold mapped commands pages
- * used for registration
- * @union.fastreg:
- * @pool: Fast registration descriptors pool for fast
- * registrations
- * @pool_size: Size of pool
+ * @size: size of the pool
- spinlock_t lock;
- union {
- struct {
- struct ib_fmr_pool *pool;
- struct iser_page_vec *page_vec;
- } fmr;
- struct {
- struct list_head pool;
- int pool_size;
- } fastreg;
- };
+ struct list_head list;
+ spinlock_t lock;
+ int size;
unsigned long flags;
spin_lock_irqsave(&fr_pool->lock, flags);
unsigned long flags;
spin_lock_irqsave(&fr_pool->lock, flags);
- desc = list_first_entry(&fr_pool->fastreg.pool,
+ desc = list_first_entry(&fr_pool->list,
struct iser_fr_desc, list);
list_del(&desc->list);
spin_unlock_irqrestore(&fr_pool->lock, flags);
struct iser_fr_desc, list);
list_del(&desc->list);
spin_unlock_irqrestore(&fr_pool->lock, flags);
unsigned long flags;
spin_lock_irqsave(&fr_pool->lock, flags);
unsigned long flags;
spin_lock_irqsave(&fr_pool->lock, flags);
- list_add(&desc->list, &fr_pool->fastreg.pool);
+ list_add(&desc->list, &fr_pool->list);
spin_unlock_irqrestore(&fr_pool->lock, flags);
}
spin_unlock_irqrestore(&fr_pool->lock, flags);
}
static
int iser_reg_page_vec(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem,
static
int iser_reg_page_vec(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem,
- struct iser_page_vec *page_vec,
+ struct iser_reg_resources *rsc,
struct iser_mem_reg *mem_reg)
{
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_mem_reg *mem_reg)
{
struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
- struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_device *device = ib_conn->device;
struct iser_device *device = ib_conn->device;
+ struct iser_page_vec *page_vec = rsc->page_vec;
+ struct ib_fmr_pool *fmr_pool = rsc->fmr_pool;
struct ib_pool_fmr *fmr;
int ret, plen;
struct ib_pool_fmr *fmr;
int ret, plen;
- fmr = ib_fmr_pool_map_phys(fr_pool->fmr.pool,
+ fmr = ib_fmr_pool_map_phys(fmr_pool,
page_vec->pages,
page_vec->length,
page_vec->pages[0]);
page_vec->pages,
page_vec->length,
page_vec->pages[0]);
if (mem->dma_nents == 1) {
return iser_reg_dma(device, mem, mem_reg);
} else { /* use FMR for multiple dma entries */
if (mem->dma_nents == 1) {
return iser_reg_dma(device, mem, mem_reg);
} else { /* use FMR for multiple dma entries */
- err = iser_reg_page_vec(iser_task, mem,
- fr_pool->fmr.page_vec, mem_reg);
+ struct iser_fr_desc *desc;
+
+ desc = list_first_entry(&fr_pool->list,
+ struct iser_fr_desc, list);
+ err = iser_reg_page_vec(iser_task, mem, &desc->rsc, mem_reg);
if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
mem->dma_nents,
ntoh24(iser_task->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
mem->dma_nents,
ntoh24(iser_task->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
- fr_pool->fmr.page_vec->data_size,
- fr_pool->fmr.page_vec->length,
- fr_pool->fmr.page_vec->offset);
- for (i = 0; i < fr_pool->fmr.page_vec->length; i++)
+ desc->rsc.page_vec->data_size,
+ desc->rsc.page_vec->length,
+ desc->rsc.page_vec->offset);
+ for (i = 0; i < desc->rsc.page_vec->length; i++)
iser_err("page_vec[%d] = 0x%llx\n", i,
iser_err("page_vec[%d] = 0x%llx\n", i,
- (unsigned long long)fr_pool->fmr.page_vec->pages[i]);
+ (unsigned long long)desc->rsc.page_vec->pages[i]);
struct iser_device *device = ib_conn->device;
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_page_vec *page_vec;
struct iser_device *device = ib_conn->device;
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_page_vec *page_vec;
+ struct iser_fr_desc *desc;
struct ib_fmr_pool *fmr_pool;
struct ib_fmr_pool_param params;
struct ib_fmr_pool *fmr_pool;
struct ib_fmr_pool_param params;
+ INIT_LIST_HEAD(&fr_pool->list);
spin_lock_init(&fr_pool->lock);
spin_lock_init(&fr_pool->lock);
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
page_vec = kmalloc(sizeof(*page_vec) +
(sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE + 1)),
GFP_KERNEL);
page_vec = kmalloc(sizeof(*page_vec) +
(sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE + 1)),
GFP_KERNEL);
- if (!page_vec)
- return ret;
+ if (!page_vec) {
+ ret = -ENOMEM;
+ goto err_frpl;
+ }
page_vec->pages = (u64 *)(page_vec + 1);
page_vec->pages = (u64 *)(page_vec + 1);
if (IS_ERR(fmr_pool)) {
ret = PTR_ERR(fmr_pool);
iser_err("FMR allocation failed, err %d\n", ret);
if (IS_ERR(fmr_pool)) {
ret = PTR_ERR(fmr_pool);
iser_err("FMR allocation failed, err %d\n", ret);
- fr_pool->fmr.page_vec = page_vec;
- fr_pool->fmr.pool = fmr_pool;
+ desc->rsc.page_vec = page_vec;
+ desc->rsc.fmr_pool = fmr_pool;
+ list_add(&desc->list, &fr_pool->list);
+err_frpl:
+ kfree(desc);
+
void iser_free_fmr_pool(struct ib_conn *ib_conn)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
void iser_free_fmr_pool(struct ib_conn *ib_conn)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
+ struct iser_fr_desc *desc;
+
+ desc = list_first_entry(&fr_pool->list,
+ struct iser_fr_desc, list);
+ list_del(&desc->list);
iser_info("freeing conn %p fmr pool %p\n",
iser_info("freeing conn %p fmr pool %p\n",
- ib_conn, fr_pool->fmr.pool);
+ ib_conn, desc->rsc.fmr_pool);
- ib_destroy_fmr_pool(fr_pool->fmr.pool);
- fr_pool->fmr.pool = NULL;
- kfree(fr_pool->fmr.page_vec);
- fr_pool->fmr.page_vec = NULL;
+ ib_destroy_fmr_pool(desc->rsc.fmr_pool);
+ kfree(desc->rsc.page_vec);
+ kfree(desc);
struct iser_fr_desc *desc;
int i, ret;
struct iser_fr_desc *desc;
int i, ret;
- INIT_LIST_HEAD(&fr_pool->fastreg.pool);
+ INIT_LIST_HEAD(&fr_pool->list);
spin_lock_init(&fr_pool->lock);
spin_lock_init(&fr_pool->lock);
- fr_pool->fastreg.pool_size = 0;
for (i = 0; i < cmds_max; i++) {
desc = iser_create_fastreg_desc(device->ib_device, device->pd,
ib_conn->pi_support);
for (i = 0; i < cmds_max; i++) {
desc = iser_create_fastreg_desc(device->ib_device, device->pd,
ib_conn->pi_support);
- list_add_tail(&desc->list, &fr_pool->fastreg.pool);
- fr_pool->fastreg.pool_size++;
+ list_add_tail(&desc->list, &fr_pool->list);
+ fr_pool->size++;
struct iser_fr_desc *desc, *tmp;
int i = 0;
struct iser_fr_desc *desc, *tmp;
int i = 0;
- if (list_empty(&fr_pool->fastreg.pool))
+ if (list_empty(&fr_pool->list))
return;
iser_info("freeing conn %p fr pool\n", ib_conn);
return;
iser_info("freeing conn %p fr pool\n", ib_conn);
- list_for_each_entry_safe(desc, tmp, &fr_pool->fastreg.pool, list) {
+ list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) {
list_del(&desc->list);
iser_free_reg_res(&desc->rsc);
if (desc->pi_ctx)
list_del(&desc->list);
iser_free_reg_res(&desc->rsc);
if (desc->pi_ctx)
- if (i < fr_pool->fastreg.pool_size)
iser_warn("pool still has %d regions registered\n",
iser_warn("pool still has %d regions registered\n",
- fr_pool->fastreg.pool_size - i);