bnx2x: Memory allocation
authorEilon Greenstein <eilong@broadcom.com>
Wed, 13 Aug 2008 22:51:07 +0000 (15:51 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 13 Aug 2008 23:02:34 +0000 (16:02 -0700)
Memory allocation
- The CQE ring was allocated to the max size even for a chip that does
  not support it. Fixed to allocate according to the chip type to save
  memory
- The rx_page_ring was not freed on driver unload

Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/bnx2x_main.c

index c8b61788abb8105ce89153f084b2960b9073435c..b8cdce21c5d9030e4b90046ea01d7ed2bc5043a7 100644 (file)
@@ -4252,7 +4252,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
 static void bnx2x_init_rx_rings(struct bnx2x *bp)
 {
        int func = BP_FUNC(bp);
-       u16 ring_prod, cqe_ring_prod = 0;
+       int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
+                                             ETH_MAX_AGGREGATION_QUEUES_E1H;
+       u16 ring_prod, cqe_ring_prod;
        int i, j;
 
        bp->rx_buf_use_size = bp->dev->mtu;
@@ -4266,9 +4268,9 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
                   bp->dev->mtu + ETH_OVREHEAD);
 
                for_each_queue(bp, j) {
-                       for (i = 0; i < ETH_MAX_AGGREGATION_QUEUES_E1H; i++) {
-                               struct bnx2x_fastpath *fp = &bp->fp[j];
+                       struct bnx2x_fastpath *fp = &bp->fp[j];
 
+                       for (i = 0; i < max_agg_queues; i++) {
                                fp->tpa_pool[i].skb =
                                   netdev_alloc_skb(bp->dev, bp->rx_buf_size);
                                if (!fp->tpa_pool[i].skb) {
@@ -4348,8 +4350,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
                                BNX2X_ERR("disabling TPA for queue[%d]\n", j);
                                /* Cleanup already allocated elements */
                                bnx2x_free_rx_sge_range(bp, fp, ring_prod);
-                               bnx2x_free_tpa_pool(bp, fp,
-                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
+                               bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
                                fp->disable_tpa = 1;
                                ring_prod = 0;
                                break;
@@ -5772,6 +5773,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
                               NUM_RCQ_BD);
 
                /* SGE ring */
+               BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
                BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
                               bnx2x_fp(bp, i, rx_sge_mapping),
                               BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
@@ -5949,7 +5951,8 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
                        dev_kfree_skb(skb);
                }
                if (!fp->disable_tpa)
-                       bnx2x_free_tpa_pool(bp, fp,
+                       bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
+                                           ETH_MAX_AGGREGATION_QUEUES_E1 :
                                            ETH_MAX_AGGREGATION_QUEUES_E1H);
        }
 }