fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
- fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
+ fcoe_init1.num_tasks = hba->max_tasks;
fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
- if (xid > BNX2FC_MAX_XID) {
+ if (xid > hba->max_xid) {
BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
xid);
goto ret_err_rqe;
BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
err_entry->data.tx_buf_off, err_entry->data.rx_buf_off);
- if (xid > BNX2FC_MAX_XID) {
+ if (xid > hba->max_xid) {
BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid);
goto ret_warn_rqe;
}
spin_lock_bh(&tgt->tgt_lock);
xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
- if (xid >= BNX2FC_MAX_TASKS) {
+ if (xid >= hba->max_tasks) {
printk(KERN_ERR PFX "ERROR:xid out of range\n");
spin_unlock_bh(&tgt->tgt_lock);
return;
int rc = 0;
struct regpair *task_ctx_bdt;
dma_addr_t addr;
+ int task_ctx_arr_sz;
int i;
/*
* Allocate task_ctx which is an array of pointers pointing to
* a page containing 32 task contexts
*/
- hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
+ task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
+ hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)),
GFP_KERNEL);
if (!hba->task_ctx) {
printk(KERN_ERR PFX "unable to allocate task context array\n");
/*
* Allocate task_ctx_dma which is an array of dma addresses
*/
- hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
+ hba->task_ctx_dma = kmalloc((task_ctx_arr_sz *
sizeof(dma_addr_t)), GFP_KERNEL);
if (!hba->task_ctx_dma) {
printk(KERN_ERR PFX "unable to alloc context mapping array\n");
}
task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
- for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+ for (i = 0; i < task_ctx_arr_sz; i++) {
hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
PAGE_SIZE,
return 0;
out3:
- for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+ for (i = 0; i < task_ctx_arr_sz; i++) {
if (hba->task_ctx[i]) {
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
{
+ int task_ctx_arr_sz;
int i;
if (hba->task_ctx_bd_tbl) {
hba->task_ctx_bd_tbl = NULL;
}
+ task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE);
if (hba->task_ctx) {
- for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
+ for (i = 0; i < task_ctx_arr_sz; i++) {
if (hba->task_ctx[i]) {
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
hba->task_ctx[i],