static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
- unsigned long flags;
u32 reg;
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (rxq->need_update == 0)
goto exit_unlock;
rxq->need_update = 0;
exit_unlock:
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
}
/*
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
/*
* If the device isn't enabled - not need to try to add buffers...
if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return;
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
/* The overwritten rxb must be a used one */
rxb = rxq->queue[rxq->write];
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
}
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
/* If the pre-allocated buffer pool is dropping low, schedule to
* refill it */
if (rxq->free_count <= RX_LOW_WATERMARK)
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
}
}
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
- unsigned long flags;
gfp_t gfp_mask = priority;
while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
return;
}
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
if (rxq->free_count > RX_LOW_WATERMARK)
gfp_mask |= __GFP_NOWARN;
return;
}
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
list);
list_del(&rxb->list);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
BUG_ON(rxb->page);
rxb->page = page;
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
list_add(&rxb->list, &rxq->rx_used);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
/* and also 256 byte aligned! */
BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
}
}
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
int i, err;
- unsigned long flags;
if (!rxq->bd) {
err = iwl_pcie_rx_alloc(trans);
return err;
}
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
rxq->read = rxq->write = 0;
rxq->write_actual = 0;
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
iwl_pcie_rx_replenish(trans);
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- unsigned long flags;
/*if rxq->bd is NULL, it means that nothing has been allocated,
* exit now */
cancel_work_sync(&trans_pcie->rx_replenish);
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
iwl_pcie_rxq_free_rbs(trans);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
rxq->bd, rxq->bd_dma);
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
- unsigned long flags;
bool page_stolen = false;
int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
u32 offset = 0;
/* Reuse the page if possible. For notification packets and
* SKBs that fail to Rx correctly, add them back into the
* rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (rxb->page != NULL) {
rxb->page_dma =
dma_map_page(trans->dev, rxb->page, 0,
}
} else
list_add_tail(&rxb->list, &rxq->rx_used);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
}
/*