RDS/IB: Always use PAGE_SIZE for FMR page size
authorAndy Grover <andy.grover@oracle.com>
Fri, 17 Jul 2009 13:13:33 +0000 (13:13 +0000)
committerDavid S. Miller <davem@davemloft.net>
Mon, 20 Jul 2009 15:03:13 +0000 (08:03 -0700)
While FMRs allow significant flexibility in what size of pages they can use,
we really just want FMR pages to match CPU page size. Roland says we can
count on this always being supported, so this simplifies things.

Signed-off-by: Andy Grover <andy.grover@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/rds/ib.c
net/rds/ib.h
net/rds/ib_rdma.c

index 27abdd3df2cc95d11d71949bfdd45626c7faa21d..868559ac42d77e1924b01204f204888121fc4f33 100644 (file)
@@ -85,9 +85,6 @@ void rds_ib_add_one(struct ib_device *device)
        rds_ibdev->max_wrs = dev_attr->max_qp_wr;
        rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE);
 
-       rds_ibdev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1);
-       rds_ibdev->fmr_page_size  = 1 << rds_ibdev->fmr_page_shift;
-       rds_ibdev->fmr_page_mask  = ~((u64) rds_ibdev->fmr_page_size - 1);
        rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32;
        rds_ibdev->max_fmrs = dev_attr->max_fmr ?
                        min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) :
index c0de7af6cf60556a4a8ba2e5fdcd91dd34462948..1378b854cac0d15ce27611a88f8acfa776ed46bd 100644 (file)
@@ -159,9 +159,6 @@ struct rds_ib_device {
        struct ib_pd            *pd;
        struct ib_mr            *mr;
        struct rds_ib_mr_pool   *mr_pool;
-       int                     fmr_page_shift;
-       int                     fmr_page_size;
-       u64                     fmr_page_mask;
        unsigned int            fmr_max_remaps;
        unsigned int            max_fmrs;
        int                     max_sge;
index 81033af930207116e5eca3369725a845c8110873..ef3ab5b7283e9ce908b0cc4633728707b1a9999b 100644 (file)
@@ -211,7 +211,7 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
 
        pool->fmr_attr.max_pages = fmr_message_size;
        pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
-       pool->fmr_attr.page_shift = rds_ibdev->fmr_page_shift;
+       pool->fmr_attr.page_shift = PAGE_SHIFT;
        pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
 
        /* We never allow more than max_items MRs to be allocated.
@@ -349,13 +349,13 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
                unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
                u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
 
-               if (dma_addr & ~rds_ibdev->fmr_page_mask) {
+               if (dma_addr & ~PAGE_MASK) {
                        if (i > 0)
                                return -EINVAL;
                        else
                                ++page_cnt;
                }
-               if ((dma_addr + dma_len) & ~rds_ibdev->fmr_page_mask) {
+               if ((dma_addr + dma_len) & ~PAGE_MASK) {
                        if (i < sg_dma_len - 1)
                                return -EINVAL;
                        else
@@ -365,7 +365,7 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
                len += dma_len;
        }
 
-       page_cnt += len >> rds_ibdev->fmr_page_shift;
+       page_cnt += len >> PAGE_SHIFT;
        if (page_cnt > fmr_message_size)
                return -EINVAL;
 
@@ -378,9 +378,9 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
                unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
                u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
 
-               for (j = 0; j < dma_len; j += rds_ibdev->fmr_page_size)
+               for (j = 0; j < dma_len; j += PAGE_SIZE)
                        dma_pages[page_cnt++] =
-                               (dma_addr & rds_ibdev->fmr_page_mask) + j;
+                               (dma_addr & PAGE_MASK) + j;
        }
 
        ret = ib_map_phys_fmr(ibmr->fmr,