1 /******************************************************************************
3 * Copyright(c) 2009-2010 Realtek Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
26 * Larry Finger <Larry.Finger@lwfinger.net>
28 *****************************************************************************/
36 #include <linux/export.h>
38 static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
45 static const u8 ac_to_hwq[] = {
52 u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw,
55 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
56 u16 fc = rtl_get_fc(skb);
57 u8 queue_index = skb_get_queue_mapping(skb);
59 if (unlikely(ieee80211_is_beacon(fc)))
61 if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
63 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
64 if (ieee80211_is_nullfunc(fc))
67 return ac_to_hwq[queue_index];
70 /* Update PCI dependent default settings*/
71 static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
73 struct rtl_priv *rtlpriv = rtl_priv(hw);
74 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
75 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
76 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
77 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
80 ppsc->reg_rfps_level = 0;
81 ppsc->b_support_aspm = 0;
83 /*Update PCI ASPM setting */
84 ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
85 switch (rtlpci->const_pci_aspm) {
91 /*ASPM dynamically enabled/disable. */
92 ppsc->reg_rfps_level |= RT_RF_LPS_LEVEL_ASPM;
96 /*ASPM with Clock Req dynamically enabled/disable. */
97 ppsc->reg_rfps_level |= (RT_RF_LPS_LEVEL_ASPM |
98 RT_RF_OFF_LEVL_CLK_REQ);
103 * Always enable ASPM and Clock Req
104 * from initialization to halt.
106 ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM);
107 ppsc->reg_rfps_level |= (RT_RF_PS_LEVEL_ALWAYS_ASPM |
108 RT_RF_OFF_LEVL_CLK_REQ);
113 * Always enable ASPM without Clock Req
114 * from initialization to halt.
116 ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM |
117 RT_RF_OFF_LEVL_CLK_REQ);
118 ppsc->reg_rfps_level |= RT_RF_PS_LEVEL_ALWAYS_ASPM;
122 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
124 /*Update Radio OFF setting */
125 switch (rtlpci->const_hwsw_rfoff_d3) {
127 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
128 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
132 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
133 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
134 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
138 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_PCI_D3;
142 /*Set HW definition to determine if it supports ASPM. */
143 switch (rtlpci->const_support_pciaspm) {
145 /*Not support ASPM. */
146 bool b_support_aspm = false;
147 ppsc->b_support_aspm = b_support_aspm;
152 bool b_support_aspm = true;
153 bool b_support_backdoor = true;
154 ppsc->b_support_aspm = b_support_aspm;
156 /*if(priv->oem_id == RT_CID_TOSHIBA &&
157 !priv->ndis_adapter.amd_l1_patch)
158 b_support_backdoor = false; */
160 ppsc->b_support_backdoor = b_support_backdoor;
165 /*ASPM value set by chipset. */
166 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
167 bool b_support_aspm = true;
168 ppsc->b_support_aspm = b_support_aspm;
172 RT_TRACE(COMP_ERR, DBG_EMERG,
173 ("switch case not process \n"));
177 /* toshiba aspm issue, toshiba will set aspm selfly
178 * so we should not set aspm in driver */
179 pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm);
180 if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE &&
182 ppsc->b_support_aspm = false;
185 static bool _rtl_pci_platform_switch_device_pci_aspm(struct ieee80211_hw *hw,
188 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
189 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
190 bool bresult = false;
192 if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
195 pci_write_config_byte(rtlpci->pdev, 0x80, value);
200 /*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
201 static bool _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
203 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
204 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
205 bool bresult = false;
207 pci_write_config_byte(rtlpci->pdev, 0x81, value);
210 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
216 /*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
217 static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
219 struct rtl_priv *rtlpriv = rtl_priv(hw);
220 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
221 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
222 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
223 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
224 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
225 u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
226 /*Retrieve original configuration settings. */
227 u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
228 u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
229 pcibridge_linkctrlreg;
232 if (!ppsc->b_support_aspm)
235 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
236 RT_TRACE(COMP_POWER, DBG_TRACE,
237 ("PCI(Bridge) UNKNOWN.\n"));
242 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
243 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
244 _rtl_pci_switch_clk_req(hw, 0x0);
248 /*for promising device will in L0 state after an I/O. */
250 pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
253 /*Set corresponding value. */
254 aspmlevel |= BIT(0) | BIT(1);
255 linkctrl_reg &= ~aspmlevel;
256 pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
258 _rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
261 /*4 Disable Pci Bridge ASPM */
262 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
263 pcicfg_addrport + (num4bytes << 2));
264 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, pcibridge_linkctrlreg);
271 *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
272 *power saving We should follow the sequence to enable
273 *RTL8192SE first then enable Pci Bridge ASPM
274 *or the system will show bluescreen.
276 static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
278 struct rtl_priv *rtlpriv = rtl_priv(hw);
279 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
280 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
281 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
282 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
283 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
284 u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
286 u8 u_pcibridge_aspmsetting;
287 u8 u_device_aspmsetting;
289 if (!ppsc->b_support_aspm)
292 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
293 RT_TRACE(COMP_POWER, DBG_TRACE,
294 ("PCI(Bridge) UNKNOWN.\n"));
298 /*4 Enable Pci Bridge ASPM */
299 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
300 pcicfg_addrport + (num4bytes << 2));
302 u_pcibridge_aspmsetting =
303 pcipriv->ndis_adapter.pcibridge_linkctrlreg |
304 rtlpci->const_hostpci_aspm_setting;
306 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
307 u_pcibridge_aspmsetting &= ~BIT(0);
309 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, u_pcibridge_aspmsetting);
311 RT_TRACE(COMP_INIT, DBG_LOUD,
312 ("PlatformEnableASPM(): Write reg[%x] = %x\n",
313 (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
314 u_pcibridge_aspmsetting));
318 /*Get ASPM level (with/without Clock Req) */
319 aspmlevel = rtlpci->const_devicepci_aspm_setting;
320 u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
322 /*_rtl_pci_platform_switch_device_pci_aspm(dev,*/
323 /*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */
325 u_device_aspmsetting |= aspmlevel;
327 _rtl_pci_platform_switch_device_pci_aspm(hw, u_device_aspmsetting);
329 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
330 _rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
331 RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
332 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
337 static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
339 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
340 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
346 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
347 pcicfg_addrport + 0xE0);
348 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, 0xA0);
350 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
351 pcicfg_addrport + 0xE0);
352 rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &offset_e0);
354 if (offset_e0 == 0xA0) {
355 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
356 pcicfg_addrport + 0xE4);
357 rtl_pci_raw_read_port_ulong(PCI_CONF_DATA, &offset_e4);
358 if (offset_e4 & BIT(23))
365 bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
366 struct rtl_priv **buddy_priv)
368 struct rtl_priv *rtlpriv = rtl_priv(hw);
369 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
370 bool b_find_buddy_priv = false;
371 struct rtl_priv *temp_priv = NULL;
372 struct rtl_pci_priv *temp_pcipriv = NULL;
374 if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
375 list_for_each_entry(temp_priv, &rtlpriv->glb_var->glb_priv_list,
379 (struct rtl_pci_priv *)temp_priv->priv;
380 RT_TRACE(COMP_INIT, DBG_LOUD,
381 (("pcipriv->ndis_adapter.funcnumber %x \n"),
382 pcipriv->ndis_adapter.funcnumber));
383 RT_TRACE(COMP_INIT, DBG_LOUD,
384 (("temp_pcipriv->ndis_adapter.funcnumber %x \n"),
385 temp_pcipriv->ndis_adapter.funcnumber));
387 if ((pcipriv->ndis_adapter.busnumber ==
388 temp_pcipriv->ndis_adapter.busnumber) &&
389 (pcipriv->ndis_adapter.devnumber ==
390 temp_pcipriv->ndis_adapter.devnumber) &&
391 (pcipriv->ndis_adapter.funcnumber !=
392 temp_pcipriv->ndis_adapter.funcnumber)) {
393 b_find_buddy_priv = true;
400 RT_TRACE(COMP_INIT, DBG_LOUD,
401 (("b_find_buddy_priv %d \n"), b_find_buddy_priv));
403 if (b_find_buddy_priv)
404 *buddy_priv = temp_priv;
406 return b_find_buddy_priv;
409 void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
411 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
412 u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
413 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
417 num4bbytes = (capabilityoffset + 0x10) / 4;
419 /*Read Link Control Register */
420 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
421 pcicfg_addrport + (num4bbytes << 2));
422 rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &linkctrl_reg);
424 pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
427 static void rtl_pci_parse_configuration(struct pci_dev *pdev,
428 struct ieee80211_hw *hw)
430 struct rtl_priv *rtlpriv = rtl_priv(hw);
431 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
437 /*Link Control Register */
438 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
439 pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg);
440 pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg;
442 RT_TRACE(COMP_INIT, DBG_TRACE,
443 ("Link Control Register =%x\n",
444 pcipriv->ndis_adapter.linkctrl_reg));
446 pci_read_config_byte(pdev, 0x98, &tmp);
448 pci_write_config_byte(pdev, 0x98, tmp);
451 pci_write_config_byte(pdev, 0x70f, tmp);
454 static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
456 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
458 _rtl_pci_update_default_setting(hw);
460 if (ppsc->reg_rfps_level & RT_RF_PS_LEVEL_ALWAYS_ASPM) {
461 /*Always enable ASPM & Clock Req. */
462 rtl_pci_enable_aspm(hw);
463 RT_SET_PS_LEVEL(ppsc, RT_RF_PS_LEVEL_ALWAYS_ASPM);
468 static void _rtl_pci_io_handler_init(struct device *dev,
469 struct ieee80211_hw *hw)
471 struct rtl_priv *rtlpriv = rtl_priv(hw);
473 rtlpriv->io.dev = dev;
475 rtlpriv->io.write8_async = pci_write8_async;
476 rtlpriv->io.write16_async = pci_write16_async;
477 rtlpriv->io.write32_async = pci_write32_async;
479 rtlpriv->io.read8_sync = pci_read8_sync;
480 rtlpriv->io.read16_sync = pci_read16_sync;
481 rtlpriv->io.read32_sync = pci_read32_sync;
485 static bool _rtl_pci_update_earlymode_info(struct ieee80211_hw *hw,
487 struct rtl_tcb_desc *tcb_desc,
490 struct rtl_priv *rtlpriv = rtl_priv(hw);
491 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
492 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
493 u8 additionlen = FCS_LEN;
494 struct sk_buff *next_skb;
496 /* here open is 4, wep/tkip is 8, aes is 12*/
497 if (info->control.hw_key)
498 additionlen += info->control.hw_key->icv_len;
500 /* The most skb num is 6 */
501 tcb_desc->empkt_num = 0;
502 spin_lock_bh(&rtlpriv->locks.waitq_lock);
503 skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) {
504 struct ieee80211_tx_info *next_info =
505 IEEE80211_SKB_CB(next_skb);
506 if (next_info->flags & IEEE80211_TX_CTL_AMPDU) {
507 tcb_desc->empkt_len[tcb_desc->empkt_num] =
508 next_skb->len + additionlen;
509 tcb_desc->empkt_num++;
514 if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid],
518 if (tcb_desc->empkt_num >= rtlhal->max_earlymode_num)
521 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
525 /* just for early mode now */
526 static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
528 struct rtl_priv *rtlpriv = rtl_priv(hw);
529 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
530 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
531 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
532 struct sk_buff *skb = NULL;
533 struct ieee80211_tx_info *info = NULL;
534 int tid; /* should be int */
536 if (!rtlpriv->rtlhal.b_earlymode_enable)
538 if (rtlpriv->dm.supp_phymode_switch &&
539 (rtlpriv->easy_concurrent_ctl.bswitch_in_process ||
540 (rtlpriv->buddy_priv &&
541 rtlpriv->buddy_priv->easy_concurrent_ctl.bswitch_in_process)))
543 /* we just use em for BE/BK/VI/VO */
544 for (tid = 7; tid >= 0; tid--) {
545 u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(hw, tid)];
546 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
547 while (!mac->act_scanning &&
548 rtlpriv->psc.rfpwr_state == ERFON) {
549 struct rtl_tcb_desc tcb_desc;
550 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
552 spin_lock_bh(&rtlpriv->locks.waitq_lock);
553 if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
554 (ring->entries - skb_queue_len(&ring->queue) >
555 rtlhal->max_earlymode_num)) {
556 skb = skb_dequeue(&mac->skb_waitq[tid]);
558 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
561 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
563 /* Some macaddr can't do early mode. like
564 * multicast/broadcast/no_qos data */
565 info = IEEE80211_SKB_CB(skb);
566 if (info->flags & IEEE80211_TX_CTL_AMPDU)
567 _rtl_pci_update_earlymode_info(hw, skb,
570 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
575 static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
577 struct rtl_priv *rtlpriv = rtl_priv(hw);
578 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
579 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
581 while (skb_queue_len(&ring->queue)) {
583 struct ieee80211_tx_info *info;
589 if (rtlpriv->use_new_trx_flow)
590 entry = (u8 *)(&ring->buffer_desc[ring->idx]);
592 entry = (u8 *)(&ring->desc[ring->idx]);
594 if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
597 ring->idx = (ring->idx + 1) % ring->entries;
599 skb = __skb_dequeue(&ring->queue);
601 pci_unmap_single(rtlpci->pdev,
602 le32_to_cpu(rtlpriv->cfg->ops->
603 get_desc((u8 *) entry, true,
604 HW_DESC_TXBUFF_ADDR)),
605 skb->len, PCI_DMA_TODEVICE);
607 /* remove early mode header */
608 if(rtlpriv->rtlhal.b_earlymode_enable)
609 skb_pull(skb, EM_HDR_LEN);
611 RT_TRACE((COMP_INTR | COMP_SEND), DBG_TRACE,
612 ("new ring->idx:%d, "
613 "free: skb_queue_len:%d, free: seq:%d\n",
615 skb_queue_len(&ring->queue),
616 *(u16 *) (skb->data + 22)));
618 if(prio == TXCMD_QUEUE) {
624 /* for sw LPS, just after NULL skb send out, we can
625 * sure AP known we are slept, our we should not let
627 fc = rtl_get_fc(skb);
628 if (ieee80211_is_nullfunc(fc)) {
629 if(ieee80211_has_pm(fc)) {
630 rtlpriv->mac80211.offchan_deley = true;
631 rtlpriv->psc.state_inap = 1;
633 rtlpriv->psc.state_inap = 0;
636 if (ieee80211_is_action(fc)) {
637 struct ieee80211_mgmt_compat *action_frame =
638 (struct ieee80211_mgmt_compat *)skb->data;
639 if (action_frame->u.action.u.ht_smps.action ==
640 WLAN_HT_ACTION_SMPS) {
646 /* update tid tx pkt num */
647 tid = rtl_get_tid(skb);
649 rtlpriv->link_info.tidtx_inperiod[tid]++;
651 info = IEEE80211_SKB_CB(skb);
652 ieee80211_tx_info_clear_status(info);
654 info->flags |= IEEE80211_TX_STAT_ACK;
655 /*info->status.rates[0].count = 1; */
657 ieee80211_tx_status_irqsafe(hw, skb);
659 if ((ring->entries - skb_queue_len(&ring->queue))
662 RT_TRACE(COMP_ERR, DBG_LOUD,
663 ("more desc left, wake"
664 "skb_queue@%d,ring->idx = %d,"
665 "skb_queue_len = 0x%d\n",
667 skb_queue_len(&ring->queue)));
669 ieee80211_wake_queue(hw,
670 skb_get_queue_mapping
677 if (((rtlpriv->link_info.num_rx_inperiod +
678 rtlpriv->link_info.num_tx_inperiod) > 8) ||
679 (rtlpriv->link_info.num_rx_inperiod > 2)) {
684 static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
685 u8 *entry, int rxring_idx, int desc_idx)
687 struct rtl_priv *rtlpriv = rtl_priv(hw);
688 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
693 skb = dev_alloc_skb(rtlpci->rxbuffersize);
696 rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
698 /* just set skb->cb to mapping addr
699 * for pci_unmap_single use */
700 *((dma_addr_t *) skb->cb) = pci_map_single(rtlpci->pdev,
701 skb_tail_pointer(skb), rtlpci->rxbuffersize,
703 bufferaddress = cpu_to_le32(*((dma_addr_t *) skb->cb));
704 if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
706 if (rtlpriv->use_new_trx_flow) {
707 rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
709 (u8 *) & bufferaddress);
711 rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
713 (u8 *) & bufferaddress);
714 rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
716 (u8 *) & rtlpci->rxbuffersize);
717 rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
725 /* In order to receive 8K AMSDU we have set skb to
726 * 9100bytes in init rx ring, but if this packet is
727 * not a AMSDU, this so big packet will be sent to
728 * TCP/IP directly, this cause big packet ping fail
729 * like: "ping -s 65507", so here we will realloc skb
730 * based on the true size of packet, I think mac80211
731 * do it will be better, but now mac80211 haven't */
733 /* but some platform will fail when alloc skb sometimes.
734 * in this condition, we will send the old skb to
735 * mac80211 directly, this will not cause any other
736 * issues, but only be lost by TCP/IP */
737 static void _rtl_pci_rx_to_mac80211(struct ieee80211_hw *hw,
738 struct sk_buff *skb, struct ieee80211_rx_status rx_status)
740 if (unlikely(!rtl_action_proc(hw, skb, false))) {
741 dev_kfree_skb_any(skb);
743 struct sk_buff *uskb = NULL;
746 uskb = dev_alloc_skb(skb->len + 128);
748 memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
750 pdata = (u8 *)skb_put(uskb, skb->len);
751 memcpy(pdata, skb->data, skb->len);
752 dev_kfree_skb_any(skb);
754 ieee80211_rx_irqsafe(hw, uskb);
756 ieee80211_rx_irqsafe(hw, skb);
761 /*hsisr interrupt handler*/
762 static void _rtl_pci_hs_interrupt(struct ieee80211_hw *hw)
764 struct rtl_priv *rtlpriv = rtl_priv(hw);
765 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
767 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR],
768 rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR]) |
769 rtlpci->sys_irq_mask);
773 static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
775 struct rtl_priv *rtlpriv = rtl_priv(hw);
776 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
777 int rxring_idx = RTL_PCI_RX_MPDU_QUEUE;
779 struct ieee80211_rx_status rx_status = { 0 };
780 unsigned int count = rtlpci->rxringcount;
781 bool unicast = false;
783 unsigned int rx_remained_cnt;
787 struct rtl_stats status = {
795 struct ieee80211_hdr *hdr;
798 /*rx buffer descriptor */
799 struct rtl_rx_buffer_desc *buffer_desc = NULL;
800 /*if use new trx flow, it means wifi info */
801 struct rtl_rx_desc *pdesc = NULL;
803 struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
804 rtlpci->rx_ring[rxring_idx].idx];
806 if (rtlpriv->use_new_trx_flow) {
808 rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
810 if (rx_remained_cnt < 1)
813 } else { /* rx descriptor */
814 pdesc = &rtlpci->rx_ring[rxring_idx].desc[
815 rtlpci->rx_ring[rxring_idx].idx];
817 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
820 if (own) /* wait data to be filled by hardware */
824 /* Get here means: data is filled already*/
825 /* AAAAAAttention !!!
826 * We can NOT access 'skb' before 'pci_unmap_single' */
827 pci_unmap_single(rtlpci->pdev, *((dma_addr_t *) skb->cb),
828 rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
830 if (rtlpriv->use_new_trx_flow) {
831 buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
832 rtlpci->rx_ring[rxring_idx].idx];
833 /*means rx wifi info*/
834 pdesc = (struct rtl_rx_desc *)skb->data;
837 rtlpriv->cfg->ops->query_rx_desc(hw, &status,
838 &rx_status, (u8 *) pdesc, skb);
840 if (rtlpriv->use_new_trx_flow)
841 rtlpriv->cfg->ops->rx_check_dma_ok(hw,
846 len = rtlpriv->cfg->ops->get_desc((u8 *)pdesc, false,
849 if (skb->end - skb->tail > len) {
851 if (rtlpriv->use_new_trx_flow)
852 skb_reserve(skb, status.rx_drvinfo_size +
853 status.rx_bufshift + 24);
855 skb_reserve(skb, status.rx_drvinfo_size +
859 printk("skb->end - skb->tail = %d, len is %d\n",
860 skb->end - skb->tail, len);
864 rtlpriv->cfg->ops->rx_command_packet_handler(hw, &status, skb);
867 *NOTICE This can not be use for mac80211,
868 *this is done in mac80211 code,
869 *if you done here sec DHCP will fail
870 *skb_trim(skb, skb->len - 4);
873 hdr = rtl_get_hdr(skb);
874 fc = rtl_get_fc(skb);
876 if (!status.b_crc && !status.b_hwerror) {
877 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
880 if (is_broadcast_ether_addr(hdr->addr1)) {
882 } else if (is_multicast_ether_addr(hdr->addr1)) {
886 rtlpriv->stats.rxbytesunicast += skb->len;
889 rtl_is_special_data(hw, skb, false);
891 if (ieee80211_is_data(fc)) {
892 rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
895 rtlpriv->link_info.num_rx_inperiod++;
898 /* static bcn for roaming */
899 rtl_beacon_statistic(hw, skb);
900 rtl_p2p_info(hw, (void*)skb->data, skb->len);
902 rtl_swlps_beacon(hw, (void*)skb->data, skb->len);
903 rtl_recognize_peer(hw, (void*)skb->data, skb->len);
904 if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) &&
905 (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)&&
906 (ieee80211_is_beacon(fc) ||
907 ieee80211_is_probe_resp(fc))) {
908 dev_kfree_skb_any(skb);
910 _rtl_pci_rx_to_mac80211(hw, skb, rx_status);
913 dev_kfree_skb_any(skb);
915 if (rtlpriv->use_new_trx_flow) {
916 rtlpci->rx_ring[hw_queue].next_rx_rp += 1;
917 rtlpci->rx_ring[hw_queue].next_rx_rp %=
918 RTL_PCI_MAX_RX_COUNT;
922 if (1/*rx_remained_cnt == 0*/) {
923 rtl_write_word(rtlpriv, 0x3B4,
924 rtlpci->rx_ring[hw_queue].next_rx_rp);
927 if (((rtlpriv->link_info.num_rx_inperiod +
928 rtlpriv->link_info.num_tx_inperiod) > 8) ||
929 (rtlpriv->link_info.num_rx_inperiod > 2)) {
933 if (rtlpriv->use_new_trx_flow) {
934 _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc,
936 rtlpci->rx_ring[rxring_idx].idx);
938 _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
939 rtlpci->rx_ring[rxring_idx].idx);
941 if (rtlpci->rx_ring[rxring_idx].idx ==
942 rtlpci->rxringcount - 1)
943 rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc,
948 rtlpci->rx_ring[rxring_idx].idx =
949 (rtlpci->rx_ring[rxring_idx].idx + 1) %
954 static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
956 struct ieee80211_hw *hw = dev_id;
957 struct rtl_priv *rtlpriv = rtl_priv(hw);
958 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
959 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
966 if (rtlpci->irq_enabled == 0)
969 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock,flags);
972 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMR], 0x0);
975 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMRE], 0x0);
978 /*read ISR: 4/8bytes */
979 rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
982 /*Shared IRQ or HW disappeared */
983 if (!inta || inta == 0xffff)
985 /*<1> beacon related */
986 if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
987 RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon ok interrupt!\n"));
990 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) {
991 RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon err interrupt!\n"));
994 if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) {
995 RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon interrupt!\n"));
998 if (inta & rtlpriv->cfg->maps[RTL_IMR_BcnInt]) {
999 RT_TRACE(COMP_INTR, DBG_TRACE,
1000 ("prepare beacon for interrupt!\n"));
1001 tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
1006 if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
1007 RT_TRACE(COMP_ERR, DBG_TRACE, ("IMR_TXFOVW!\n"));
1009 if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
1010 RT_TRACE(COMP_INTR, DBG_TRACE, ("Manage ok interrupt!\n"));
1011 _rtl_pci_tx_isr(hw, MGNT_QUEUE);
1014 if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
1015 RT_TRACE(COMP_INTR, DBG_TRACE, ("HIGH_QUEUE ok interrupt!\n"));
1016 _rtl_pci_tx_isr(hw, HIGH_QUEUE);
1019 if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
1020 rtlpriv->link_info.num_tx_inperiod++;
1022 RT_TRACE(COMP_INTR, DBG_TRACE, ("BK Tx OK interrupt!\n"));
1023 _rtl_pci_tx_isr(hw, BK_QUEUE);
1026 if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
1027 rtlpriv->link_info.num_tx_inperiod++;
1029 RT_TRACE(COMP_INTR, DBG_TRACE, ("BE TX OK interrupt!\n"));
1030 _rtl_pci_tx_isr(hw, BE_QUEUE);
1033 if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
1034 rtlpriv->link_info.num_tx_inperiod++;
1036 RT_TRACE(COMP_INTR, DBG_TRACE, ("VI TX OK interrupt!\n"));
1037 _rtl_pci_tx_isr(hw, VI_QUEUE);
1040 if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
1041 rtlpriv->link_info.num_tx_inperiod++;
1043 RT_TRACE(COMP_INTR, DBG_TRACE, ("Vo TX OK interrupt!\n"));
1044 _rtl_pci_tx_isr(hw, VO_QUEUE);
1047 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
1048 if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
1049 rtlpriv->link_info.num_tx_inperiod++;
1051 RT_TRACE(COMP_INTR, DBG_TRACE,
1052 ("CMD TX OK interrupt!\n"));
1053 _rtl_pci_tx_isr(hw, TXCMD_QUEUE);
1058 if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
1059 RT_TRACE(COMP_INTR, DBG_TRACE, ("Rx ok interrupt!\n"));
1061 _rtl_pci_rx_interrupt(hw);
1065 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
1066 RT_TRACE(COMP_ERR, DBG_WARNING,
1067 ("rx descriptor unavailable!\n"));
1068 rtl_write_byte(rtlpriv, 0xb4, BIT(1) );
1069 _rtl_pci_rx_interrupt(hw);
1072 if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
1073 RT_TRACE(COMP_ERR, DBG_WARNING, ("rx overflow !\n"));
1074 _rtl_pci_rx_interrupt(hw);
1078 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
1079 if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
1080 RT_TRACE(COMP_INTR, DBG_TRACE,
1081 ("firmware interrupt!\n"));
1082 queue_delayed_work(rtlpriv->works.rtl_wq,
1083 &rtlpriv->works.fwevt_wq, 0);
1087 /*<5> hsisr related*/
1088 /* Only 8188EE & 8723BE Supported.
1089 * If Other ICs Come in, System will corrupt,
1090 * because maps[RTL_IMR_HSISR_IND] & maps[MAC_HSISR]
1091 * are not initialized*/
1092 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE ||
1093 rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
1094 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
1095 RT_TRACE(COMP_INTR, DBG_TRACE,
1096 ("hsisr interrupt!\n"));
1097 _rtl_pci_hs_interrupt(hw);
1102 if(rtlpriv->rtlhal.b_earlymode_enable)
1103 tasklet_schedule(&rtlpriv->works.irq_tasklet);
1105 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMR],
1106 rtlpci->irq_mask[0]);
1107 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMRE],
1108 rtlpci->irq_mask[1]);
1109 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1114 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1118 static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
1120 _rtl_pci_tx_chk_waitq(hw);
1123 static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
1125 struct rtl_priv *rtlpriv = rtl_priv(hw);
1126 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1127 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1128 struct rtl8192_tx_ring *ring = NULL;
1129 struct ieee80211_hdr *hdr = NULL;
1130 struct ieee80211_tx_info *info = NULL;
1131 struct sk_buff *pskb = NULL;
1132 struct rtl_tx_desc *pdesc = NULL;
1133 struct rtl_tcb_desc tcb_desc;
1134 /*This is for new trx flow*/
1135 struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
1138 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
1139 ring = &rtlpci->tx_ring[BEACON_QUEUE];
1140 pskb = __skb_dequeue(&ring->queue);
1144 /*NB: the beacon data buffer must be 32-bit aligned. */
1145 pskb = ieee80211_beacon_get(hw, mac->vif);
1148 hdr = rtl_get_hdr(pskb);
1149 info = IEEE80211_SKB_CB(pskb);
1150 pdesc = &ring->desc[0];
1151 if (rtlpriv->use_new_trx_flow)
1152 pbuffer_desc = &ring->buffer_desc[0];
1154 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
1155 (u8 *)pbuffer_desc, info, NULL, pskb,
1156 BEACON_QUEUE, &tcb_desc);
1158 __skb_queue_tail(&ring->queue, pskb);
1160 rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true, HW_DESC_OWN,
1166 static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
1168 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1169 struct rtl_priv *rtlpriv = rtl_priv(hw);
1170 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1174 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE)
1175 desc_num = TX_DESC_NUM_92E;
1177 desc_num = RT_TXDESC_NUM;
1179 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1180 rtlpci->txringcount[i] = desc_num;
1183 *we just alloc 2 desc for beacon queue,
1184 *because we just need first desc in hw beacon.
1186 rtlpci->txringcount[BEACON_QUEUE] = 2;
1189 *BE queue need more descriptor for performance
1190 *consideration or, No more tx desc will happen,
1191 *and may cause mac80211 mem leakage.
1193 if (rtl_priv(hw)->use_new_trx_flow == false)
1194 rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE;
1196 rtlpci->rxbuffersize = 9100; /*2048/1024; */
1197 rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT; /*64; */
1200 static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
1201 struct pci_dev *pdev)
1203 struct rtl_priv *rtlpriv = rtl_priv(hw);
1204 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1205 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1206 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1208 rtlpriv->rtlhal.up_first_time = true;
1209 rtlpriv->rtlhal.being_init_adapter = false;
1212 rtlpci->pdev = pdev;
1214 /*Tx/Rx related var */
1215 _rtl_pci_init_trx_var(hw);
1217 /*IBSS*/ mac->beacon_interval = 100;
1220 mac->min_space_cfg = 0;
1221 mac->max_mss_density = 0;
1222 /*set sane AMPDU defaults */
1223 mac->current_ampdu_density = 7;
1224 mac->current_ampdu_factor = 3;
1227 rtlpci->acm_method = eAcmWay2_SW;
1230 tasklet_init(&rtlpriv->works.irq_tasklet,
1231 (void (*)(unsigned long))_rtl_pci_irq_tasklet,
1233 tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
1234 (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
1238 static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
1239 unsigned int prio, unsigned int entries)
1241 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1242 struct rtl_priv *rtlpriv = rtl_priv(hw);
1243 struct rtl_tx_buffer_desc *buffer_desc;
1244 struct rtl_tx_desc *desc;
1245 dma_addr_t buffer_desc_dma, desc_dma;
1246 u32 nextdescaddress;
1249 /* alloc tx buffer desc for new trx flow*/
1250 if (rtlpriv->use_new_trx_flow) {
1252 pci_zalloc_consistent(rtlpci->pdev,
1253 sizeof(*buffer_desc) * entries,
1256 if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) {
1257 RT_TRACE(COMP_ERR, DBG_EMERG,
1258 ("Cannot allocate TX ring (prio = %d)\n",
1263 rtlpci->tx_ring[prio].buffer_desc = buffer_desc;
1264 rtlpci->tx_ring[prio].buffer_desc_dma = buffer_desc_dma;
1266 rtlpci->tx_ring[prio].cur_tx_rp = 0;
1267 rtlpci->tx_ring[prio].cur_tx_wp = 0;
1268 rtlpci->tx_ring[prio].avl_desc = entries;
1272 /* alloc dma for this ring */
1273 desc = pci_zalloc_consistent(rtlpci->pdev, sizeof(*desc) * entries,
1276 if (!desc || (unsigned long)desc & 0xFF) {
1277 RT_TRACE(COMP_ERR, DBG_EMERG,
1278 ("Cannot allocate TX ring (prio = %d)\n", prio));
1282 rtlpci->tx_ring[prio].desc = desc;
1283 rtlpci->tx_ring[prio].dma = desc_dma;
1285 rtlpci->tx_ring[prio].idx = 0;
1286 rtlpci->tx_ring[prio].entries = entries;
1287 skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
1288 RT_TRACE(COMP_INIT, DBG_LOUD,
1289 ("queue:%d, ring_addr:%p\n", prio, desc));
1291 /* init every desc in this ring */
1292 if (rtlpriv->use_new_trx_flow == false) {
1293 for (i = 0; i < entries; i++) {
1294 nextdescaddress = cpu_to_le32((u32) desc_dma +
1295 ((i + 1) % entries) *
1298 rtlpriv->cfg->ops->set_desc(hw, (u8 *) & (desc[i]),
1300 HW_DESC_TX_NEXTDESC_ADDR,
1301 (u8 *) & nextdescaddress);
1307 static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
1309 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1310 struct rtl_priv *rtlpriv = rtl_priv(hw);
1314 if (rtlpriv->use_new_trx_flow) {
1315 struct rtl_rx_buffer_desc *entry = NULL;
1316 /* alloc dma for this ring */
1317 rtlpci->rx_ring[rxring_idx].buffer_desc =
1318 pci_zalloc_consistent(rtlpci->pdev,
1319 sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) * rtlpci->rxringcount,
1320 &rtlpci->rx_ring[rxring_idx].dma);
1321 if (!rtlpci->rx_ring[rxring_idx].buffer_desc ||
1322 (unsigned long)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) {
1323 RT_TRACE(COMP_ERR, DBG_EMERG, ("Cannot allocate RX ring\n"));
1327 /* init every desc in this ring */
1328 rtlpci->rx_ring[rxring_idx].idx = 0;
1329 for (i = 0; i < rtlpci->rxringcount; i++) {
1330 entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
1331 if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
1336 struct rtl_rx_desc *entry = NULL;
1338 /* alloc dma for this ring */
1339 rtlpci->rx_ring[rxring_idx].desc =
1340 pci_zalloc_consistent(rtlpci->pdev,
1341 sizeof(*rtlpci->rx_ring[rxring_idx].desc) * rtlpci->rxringcount,
1342 &rtlpci->rx_ring[rxring_idx].dma);
1343 if (!rtlpci->rx_ring[rxring_idx].desc ||
1344 (unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) {
1345 RT_TRACE(COMP_ERR, DBG_EMERG,
1346 ("Cannot allocate RX ring\n"));
1350 /* init every desc in this ring */
1351 rtlpci->rx_ring[rxring_idx].idx = 0;
1352 for (i = 0; i < rtlpci->rxringcount; i++) {
1353 entry = &rtlpci->rx_ring[rxring_idx].desc[i];
1354 if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
1358 rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
1359 HW_DESC_RXERO, (u8 *) & tmp_one);
1364 static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
1367 struct rtl_priv *rtlpriv = rtl_priv(hw);
1368 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1369 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
1371 /* free every desc in this ring */
1372 while (skb_queue_len(&ring->queue)) {
1374 struct sk_buff *skb = __skb_dequeue(&ring->queue);
1375 if (rtlpriv->use_new_trx_flow)
1376 entry = (u8 *)(&ring->buffer_desc[ring->idx]);
1378 entry = (u8 *)(&ring->desc[ring->idx]);
1380 pci_unmap_single(rtlpci->pdev,
1381 le32_to_cpu(rtlpriv->cfg->ops->get_desc(
1382 (u8 *) entry, true, HW_DESC_TXBUFF_ADDR)),
1383 skb->len, PCI_DMA_TODEVICE);
1385 ring->idx = (ring->idx + 1) % ring->entries;
1388 /* free dma of this ring */
1389 pci_free_consistent(rtlpci->pdev,
1390 sizeof(*ring->desc) * ring->entries,
1391 ring->desc, ring->dma);
1393 if (rtlpriv->use_new_trx_flow) {
1394 pci_free_consistent(rtlpci->pdev,
1395 sizeof(*ring->buffer_desc) * ring->entries,
1396 ring->buffer_desc, ring->buffer_desc_dma);
1397 ring->buffer_desc = NULL;
1401 static void _rtl_pci_free_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
1403 struct rtl_priv *rtlpriv = rtl_priv(hw);
1404 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1407 /* free every desc in this ring */
1408 for (i = 0; i < rtlpci->rxringcount; i++) {
1409 struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[i];
1413 pci_unmap_single(rtlpci->pdev, *((dma_addr_t *) skb->cb),
1414 rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
1418 /* free dma of this ring */
1419 if (rtlpriv->use_new_trx_flow) {
1420 pci_free_consistent(rtlpci->pdev,
1421 sizeof(*rtlpci->rx_ring[rxring_idx].
1422 buffer_desc) * rtlpci->rxringcount,
1423 rtlpci->rx_ring[rxring_idx].buffer_desc,
1424 rtlpci->rx_ring[rxring_idx].dma);
1425 rtlpci->rx_ring[rxring_idx].buffer_desc = NULL;
1427 pci_free_consistent(rtlpci->pdev,
1428 sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
1429 rtlpci->rxringcount,
1430 rtlpci->rx_ring[rxring_idx].desc,
1431 rtlpci->rx_ring[rxring_idx].dma);
1432 rtlpci->rx_ring[rxring_idx].desc = NULL;
1436 static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw)
1438 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1442 /* rxring_idx 0:RX_MPDU_QUEUE
1443 * rxring_idx 1:RX_CMD_QUEUE */
1444 for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) {
1445 ret = _rtl_pci_init_rx_ring(hw, rxring_idx);
1450 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1451 ret = _rtl_pci_init_tx_ring(hw, i, rtlpci->txringcount[i]);
1453 goto err_free_rings;
1459 for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++)
1460 _rtl_pci_free_rx_ring(hw, rxring_idx);
1462 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1463 if (rtlpci->tx_ring[i].desc ||
1464 rtlpci->tx_ring[i].buffer_desc)
1465 _rtl_pci_free_tx_ring(hw, i);
1470 static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw)
1475 for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++)
1476 _rtl_pci_free_rx_ring(hw, rxring_idx);
1479 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1480 _rtl_pci_free_tx_ring(hw, i);
1485 int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1487 struct rtl_priv *rtlpriv = rtl_priv(hw);
1488 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1490 unsigned long flags;
1492 /* rxring_idx 0:RX_MPDU_QUEUE */
1493 /* rxring_idx 1:RX_CMD_QUEUE */
1494 for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) {
1495 /* force the rx_ring[RX_MPDU_QUEUE/
1496 * RX_CMD_QUEUE].idx to the first one */
1497 /*new trx flow, do nothing*/
1498 if ((rtlpriv->use_new_trx_flow == false) &&
1499 rtlpci->rx_ring[rxring_idx].desc) {
1500 struct rtl_rx_desc *entry = NULL;
1502 for (i = 0; i < rtlpci->rxringcount; i++) {
1503 entry = &rtlpci->rx_ring[rxring_idx].desc[i];
1504 rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry,
1510 rtlpci->rx_ring[rxring_idx].idx = 0; }
1512 /* after reset, release previous pending packet,
1513 * and force the tx idx to the first one */
1514 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1515 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1516 if (rtlpci->tx_ring[i].desc ||
1517 rtlpci->tx_ring[i].buffer_desc) {
1518 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
1520 while (skb_queue_len(&ring->queue)) {
1522 struct sk_buff *skb =
1523 __skb_dequeue(&ring->queue);
1524 if (rtlpriv->use_new_trx_flow)
1525 entry = (u8 *)(&ring->buffer_desc
1528 entry = (u8 *)(&ring->desc[ring->idx]);
1530 pci_unmap_single(rtlpci->pdev,
1531 le32_to_cpu(rtlpriv->cfg->ops->get_desc(
1533 HW_DESC_TXBUFF_ADDR)),
1534 skb->len, PCI_DMA_TODEVICE);
1536 ring->idx = (ring->idx + 1) % ring->entries;
1542 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1547 static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1548 struct ieee80211_sta *sta,
1549 struct sk_buff *skb)
1551 struct rtl_priv *rtlpriv = rtl_priv(hw);
1552 struct rtl_sta_info *sta_entry = NULL;
1553 u8 tid = rtl_get_tid(skb);
1554 u16 fc = rtl_get_fc(skb);
1558 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1560 if (!rtlpriv->rtlhal.b_earlymode_enable)
1562 if (ieee80211_is_nullfunc(fc))
1564 if (ieee80211_is_qos_nullfunc(fc))
1566 if (ieee80211_is_pspoll(fc)) {
1570 if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL)
1572 if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE)
1576 /* maybe every tid should be checked */
1577 if (!rtlpriv->link_info.higher_busytxtraffic[tid])
1580 spin_lock_bh(&rtlpriv->locks.waitq_lock);
1581 skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb);
1582 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
1587 static int rtl_pci_tx(struct ieee80211_hw *hw,
1588 struct ieee80211_sta *sta,
1589 struct sk_buff *skb,
1590 struct rtl_tcb_desc *ptcb_desc)
1592 struct rtl_priv *rtlpriv = rtl_priv(hw);
1593 struct rtl_sta_info *sta_entry = NULL;
1594 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1595 struct rtl8192_tx_ring *ring;
1596 struct rtl_tx_desc *pdesc;
1597 struct rtl_tx_buffer_desc *ptx_bd_desc = NULL;
1601 u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
1602 unsigned long flags;
1603 struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
1604 u16 fc = rtl_get_fc(skb);
1605 u8 *pda_addr = hdr->addr1;
1606 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1612 if (ieee80211_is_mgmt(fc))
1613 rtl_tx_mgmt_proc(hw, skb);
1615 if (rtlpriv->psc.sw_ps_enabled) {
1616 if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
1617 !ieee80211_has_pm(fc))
1618 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
1621 rtl_action_proc(hw, skb, true);
1623 if (is_multicast_ether_addr(pda_addr))
1624 rtlpriv->stats.txbytesmulticast += skb->len;
1625 else if (is_broadcast_ether_addr(pda_addr))
1626 rtlpriv->stats.txbytesbroadcast += skb->len;
1628 rtlpriv->stats.txbytesunicast += skb->len;
1630 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1631 ring = &rtlpci->tx_ring[hw_queue];
1632 if (hw_queue != BEACON_QUEUE) {
1633 if (rtlpriv->use_new_trx_flow)
1634 idx = ring->cur_tx_wp;
1636 idx = (ring->idx + skb_queue_len(&ring->queue)) %
1642 pdesc = &ring->desc[idx];
1644 if (rtlpriv->use_new_trx_flow) {
1645 ptx_bd_desc = &ring->buffer_desc[idx];
1647 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
1650 if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
1651 RT_TRACE(COMP_ERR, DBG_WARNING,
1652 ("No more TX desc@%d, ring->idx = %d,"
1653 "idx = %d, skb_queue_len = 0x%d\n",
1654 hw_queue, ring->idx, idx,
1655 skb_queue_len(&ring->queue)));
1657 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
1663 if (ieee80211_is_data_qos(fc)) {
1664 tid = rtl_get_tid(skb);
1666 sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1667 seq_number = (le16_to_cpu(hdr->seq_ctrl) &
1668 IEEE80211_SCTL_SEQ) >> 4;
1671 if (!ieee80211_has_morefrags(hdr->frame_control))
1672 sta_entry->tids[tid].seq_number = seq_number;
1676 if (ieee80211_is_data(fc))
1677 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
1679 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
1680 (u8 *)ptx_bd_desc, info, sta, skb,
1681 hw_queue, ptcb_desc);
1683 __skb_queue_tail(&ring->queue, skb);
1684 if (rtlpriv->use_new_trx_flow) {
1685 rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true,
1686 HW_DESC_OWN, (u8 *) & hw_queue);
1688 rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true,
1689 HW_DESC_OWN, (u8 *) & temp_one);
1692 if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
1693 hw_queue != BEACON_QUEUE) {
1695 RT_TRACE(COMP_ERR, DBG_LOUD,
1696 ("less desc left, stop skb_queue@%d, "
1698 "idx = %d, skb_queue_len = 0x%d\n",
1699 hw_queue, ring->idx, idx,
1700 skb_queue_len(&ring->queue)));
1702 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
1705 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1707 rtlpriv->cfg->ops->tx_polling(hw, hw_queue);
1711 static void rtl_pci_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
1713 struct rtl_priv *rtlpriv = rtl_priv(hw);
1714 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1715 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1716 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1719 struct rtl8192_tx_ring *ring;
1724 for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) {
1726 if (((queues >> queue_id) & 0x1) == 0) {
1730 ring = &pcipriv->dev.tx_ring[queue_id];
1731 queue_len = skb_queue_len(&ring->queue);
1732 if (queue_len == 0 || queue_id == BEACON_QUEUE ||
1733 queue_id == TXCMD_QUEUE) {
1741 /* we just wait 1s for all queues */
1742 if (rtlpriv->psc.rfpwr_state == ERFOFF ||
1743 is_hal_stop(rtlhal) || i >= 200)
1748 void rtl_pci_deinit(struct ieee80211_hw *hw)
1750 struct rtl_priv *rtlpriv = rtl_priv(hw);
1751 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1753 _rtl_pci_deinit_trx_ring(hw);
1755 synchronize_irq(rtlpci->pdev->irq);
1756 tasklet_kill(&rtlpriv->works.irq_tasklet);
1758 flush_workqueue(rtlpriv->works.rtl_wq);
1759 destroy_workqueue(rtlpriv->works.rtl_wq);
1763 int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
1765 struct rtl_priv *rtlpriv = rtl_priv(hw);
1768 _rtl_pci_init_struct(hw, pdev);
1770 err = _rtl_pci_init_trx_ring(hw);
1772 RT_TRACE(COMP_ERR, DBG_EMERG,
1773 ("tx ring initialization failed"));
1780 int rtl_pci_start(struct ieee80211_hw *hw)
1782 struct rtl_priv *rtlpriv = rtl_priv(hw);
1783 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1784 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1787 RT_TRACE(COMP_INIT, DBG_DMESG, (" rtl_pci_start \n"));
1788 rtl_pci_reset_trx_ring(hw);
1790 rtlpriv->rtlhal.driver_is_goingto_unload = false;
1791 err = rtlpriv->cfg->ops->hw_init(hw);
1793 RT_TRACE(COMP_INIT, DBG_DMESG,
1794 ("Failed to config hardware err %x!\n",err));
1798 rtlpriv->cfg->ops->enable_interrupt(hw);
1799 RT_TRACE(COMP_INIT, DBG_LOUD, ("enable_interrupt OK\n"));
1801 rtl_init_rx_config(hw);
1803 /*should after adapter start and interrupt enable. */
1804 set_hal_start(rtlhal);
1806 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1808 rtlpriv->rtlhal.up_first_time = false;
1810 RT_TRACE(COMP_INIT, DBG_DMESG, ("rtl_pci_start OK\n"));
1814 void rtl_pci_stop(struct ieee80211_hw *hw)
1816 struct rtl_priv *rtlpriv = rtl_priv(hw);
1817 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1818 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1819 u8 RFInProgressTimeOut = 0;
1822 *should before disable interrupt&adapter
1823 *and will do it immediately.
1825 set_hal_stop(rtlhal);
1827 rtlpriv->cfg->ops->disable_interrupt(hw);
1829 spin_lock(&rtlpriv->locks.rf_ps_lock);
1830 while (ppsc->rfchange_inprogress) {
1831 spin_unlock(&rtlpriv->locks.rf_ps_lock);
1832 if (RFInProgressTimeOut > 100) {
1833 spin_lock(&rtlpriv->locks.rf_ps_lock);
1837 RFInProgressTimeOut++;
1838 spin_lock(&rtlpriv->locks.rf_ps_lock);
1840 ppsc->rfchange_inprogress = true;
1841 spin_unlock(&rtlpriv->locks.rf_ps_lock);
1843 rtlpriv->rtlhal.driver_is_goingto_unload = true;
1844 rtlpriv->cfg->ops->hw_disable(hw);
1845 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1847 spin_lock(&rtlpriv->locks.rf_ps_lock);
1848 ppsc->rfchange_inprogress = false;
1849 spin_unlock(&rtlpriv->locks.rf_ps_lock);
1851 rtl_pci_enable_aspm(hw);
1854 static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1855 struct ieee80211_hw *hw)
1857 struct rtl_priv *rtlpriv = rtl_priv(hw);
1858 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1859 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1860 struct pci_dev *bridge_pdev = pdev->bus->self;
1867 venderid = pdev->vendor;
1868 deviceid = pdev->device;
1869 pci_read_config_byte(pdev, 0x8, &revisionid);
1870 pci_read_config_word(pdev, 0x3C, &irqline);
1872 if (deviceid == RTL_PCI_8192_DID ||
1873 deviceid == RTL_PCI_0044_DID ||
1874 deviceid == RTL_PCI_0047_DID ||
1875 deviceid == RTL_PCI_8192SE_DID ||
1876 deviceid == RTL_PCI_8174_DID ||
1877 deviceid == RTL_PCI_8173_DID ||
1878 deviceid == RTL_PCI_8172_DID ||
1879 deviceid == RTL_PCI_8171_DID) {
1880 switch (revisionid) {
1881 case RTL_PCI_REVISION_ID_8192PCIE:
1882 RT_TRACE(COMP_INIT, DBG_DMESG,
1883 ("8192E is found but not supported now-"
1884 "vid/did=%x/%x\n", venderid, deviceid));
1885 rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
1888 case RTL_PCI_REVISION_ID_8192SE:
1889 RT_TRACE(COMP_INIT, DBG_DMESG,
1890 ("8192SE is found - "
1891 "vid/did=%x/%x\n", venderid, deviceid));
1892 rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1895 RT_TRACE(COMP_ERR, DBG_WARNING,
1896 ("Err: Unknown device - "
1897 "vid/did=%x/%x\n", venderid, deviceid));
1898 rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1902 }else if(deviceid == RTL_PCI_8723AE_DID) {
1903 rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE;
1904 RT_TRACE(COMP_INIT, DBG_DMESG,
1905 ("8723AE PCI-E is found - "
1906 "vid/did=%x/%x\n", venderid, deviceid));
1907 } else if (deviceid == RTL_PCI_8192CET_DID ||
1908 deviceid == RTL_PCI_8192CE_DID ||
1909 deviceid == RTL_PCI_8191CE_DID ||
1910 deviceid == RTL_PCI_8188CE_DID) {
1911 rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
1912 RT_TRACE(COMP_INIT, DBG_DMESG,
1913 ("8192C PCI-E is found - "
1914 "vid/did=%x/%x\n", venderid, deviceid));
1915 } else if (deviceid == RTL_PCI_8192DE_DID ||
1916 deviceid == RTL_PCI_8192DE_DID2) {
1917 rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
1918 RT_TRACE(COMP_INIT, DBG_DMESG,
1919 ("8192D PCI-E is found - "
1920 "vid/did=%x/%x\n", venderid, deviceid));
1921 }else if(deviceid == RTL_PCI_8188EE_DID){
1922 rtlhal->hw_type = HARDWARE_TYPE_RTL8188EE;
1923 RT_TRACE(COMP_INIT,DBG_LOUD,
1924 ("Find adapter, Hardware type is 8188EE\n"));
1925 }else if (deviceid == RTL_PCI_8723BE_DID){
1926 rtlhal->hw_type = HARDWARE_TYPE_RTL8723BE;
1927 RT_TRACE(COMP_INIT,DBG_LOUD,
1928 ("Find adapter, Hardware type is 8723BE\n"));
1929 }else if (deviceid == RTL_PCI_8192EE_DID){
1930 rtlhal->hw_type = HARDWARE_TYPE_RTL8192EE;
1931 RT_TRACE(COMP_INIT,DBG_LOUD,
1932 ("Find adapter, Hardware type is 8192EE\n"));
1933 }else if (deviceid == RTL_PCI_8821AE_DID) {
1934 rtlhal->hw_type = HARDWARE_TYPE_RTL8821AE;
1935 RT_TRACE(COMP_INIT,DBG_LOUD,
1936 ("Find adapter, Hardware type is 8821AE\n"));
1937 }else if (deviceid == RTL_PCI_8812AE_DID) {
1938 rtlhal->hw_type = HARDWARE_TYPE_RTL8812AE;
1939 RT_TRACE(COMP_INIT,DBG_LOUD,
1940 ("Find adapter, Hardware type is 8812AE\n"));
1942 RT_TRACE(COMP_ERR, DBG_WARNING,
1943 ("Err: Unknown device -"
1944 " vid/did=%x/%x\n", venderid, deviceid));
1946 rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
1949 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
1950 if (revisionid == 0 || revisionid == 1) {
1951 if (revisionid == 0) {
1952 RT_TRACE(COMP_INIT, DBG_LOUD,
1953 ("Find 92DE MAC0.\n"));
1954 rtlhal->interfaceindex = 0;
1955 } else if (revisionid == 1) {
1956 RT_TRACE(COMP_INIT, DBG_LOUD,
1957 ("Find 92DE MAC1.\n"));
1958 rtlhal->interfaceindex = 1;
1961 RT_TRACE(COMP_INIT, DBG_LOUD, ("Unknown device - "
1962 "VendorID/DeviceID=%x/%x, Revision=%x\n",
1963 venderid, deviceid, revisionid));
1964 rtlhal->interfaceindex = 0;
1968 /* 92ee use new trx flow */
1969 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE)
1970 rtlpriv->use_new_trx_flow = true;
1972 rtlpriv->use_new_trx_flow = false;
1975 pcipriv->ndis_adapter.busnumber = pdev->bus->number;
1976 pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
1977 pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
1979 /*find bridge info */
1980 pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN;
1981 /* some ARM have no bridge_pdev and will crash here
1982 * so we should check if bridge_pdev is NULL */
1984 pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
1985 for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
1986 if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
1987 pcipriv->ndis_adapter.pcibridge_vendor = tmp;
1988 RT_TRACE(COMP_INIT, DBG_DMESG,
1989 ("Pci Bridge Vendor is found index: %d\n",
1996 if (pcipriv->ndis_adapter.pcibridge_vendor !=
1997 PCI_BRIDGE_VENDOR_UNKNOWN) {
1998 pcipriv->ndis_adapter.pcibridge_busnum =
1999 bridge_pdev->bus->number;
2000 pcipriv->ndis_adapter.pcibridge_devnum =
2001 PCI_SLOT(bridge_pdev->devfn);
2002 pcipriv->ndis_adapter.pcibridge_funcnum =
2003 PCI_FUNC(bridge_pdev->devfn);
2004 pcipriv->ndis_adapter.pcicfg_addrport =
2005 (pcipriv->ndis_adapter.pcibridge_busnum << 16) |
2006 (pcipriv->ndis_adapter.pcibridge_devnum << 11) |
2007 (pcipriv->ndis_adapter.pcibridge_funcnum << 8) | (1 << 31);
2008 pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
2009 pci_pcie_cap(bridge_pdev);
2010 pcipriv->ndis_adapter.num4bytes =
2011 (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
2013 rtl_pci_get_linkcontrol_field(hw);
2015 if (pcipriv->ndis_adapter.pcibridge_vendor ==
2016 PCI_BRIDGE_VENDOR_AMD) {
2017 pcipriv->ndis_adapter.amd_l1_patch =
2018 rtl_pci_get_amd_l1_patch(hw);
2022 RT_TRACE(COMP_INIT, DBG_DMESG,
2023 ("pcidev busnumber:devnumber:funcnumber:"
2024 "vendor:link_ctl %d:%d:%d:%x:%x\n",
2025 pcipriv->ndis_adapter.busnumber,
2026 pcipriv->ndis_adapter.devnumber,
2027 pcipriv->ndis_adapter.funcnumber,
2028 pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg));
2030 RT_TRACE(COMP_INIT, DBG_DMESG,
2031 ("pci_bridge busnumber:devnumber:funcnumber:vendor:"
2032 "pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
2033 pcipriv->ndis_adapter.pcibridge_busnum,
2034 pcipriv->ndis_adapter.pcibridge_devnum,
2035 pcipriv->ndis_adapter.pcibridge_funcnum,
2036 pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
2037 pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
2038 pcipriv->ndis_adapter.pcibridge_linkctrlreg,
2039 pcipriv->ndis_adapter.amd_l1_patch));
2041 rtl_pci_parse_configuration(pdev, hw);
2042 list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
2046 static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
2048 struct rtl_priv *rtlpriv = rtl_priv(hw);
2049 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2050 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
2052 ret = pci_enable_msi(rtlpci->pdev);
2056 ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
2057 IRQF_SHARED, KBUILD_MODNAME, hw);
2059 pci_disable_msi(rtlpci->pdev);
2063 rtlpci->using_msi = true;
2065 RT_TRACE(COMP_INIT|COMP_INTR, DBG_DMESG, ("MSI Interrupt Mode!\n"));
2069 static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
2071 struct rtl_priv *rtlpriv = rtl_priv(hw);
2072 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2073 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
2076 ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
2077 IRQF_SHARED, KBUILD_MODNAME, hw);
2082 rtlpci->using_msi = false;
2083 RT_TRACE(COMP_INIT|COMP_INTR, DBG_DMESG,
2084 ("Pin-based Interrupt Mode!\n"));
2088 static int rtl_pci_intr_mode_decide(struct ieee80211_hw *hw)
2090 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2091 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
2093 if (rtlpci->msi_support == true) {
2094 ret = rtl_pci_intr_mode_msi(hw);
2096 ret = rtl_pci_intr_mode_legacy(hw);
2098 ret = rtl_pci_intr_mode_legacy(hw);
2103 /* this is used for other modules get
2104 * hw pointer in rtl_pci_get_hw_pointer */
2105 struct ieee80211_hw *hw_export = NULL;
2107 int rtl_pci_probe(struct pci_dev *pdev,
2108 const struct pci_device_id *id)
2110 struct ieee80211_hw *hw = NULL;
2112 struct rtl_priv *rtlpriv = NULL;
2113 struct rtl_pci_priv *pcipriv = NULL;
2114 struct rtl_pci *rtlpci;
2115 unsigned long pmem_start, pmem_len, pmem_flags;
2119 err = pci_enable_device(pdev);
2122 ("%s : Cannot enable new PCI device\n",
2127 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
2128 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2129 RT_ASSERT(false, ("Unable to obtain 32bit DMA "
2130 "for consistent allocations\n"));
2131 pci_disable_device(pdev);
2136 pci_set_master(pdev);
2138 hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
2139 sizeof(struct rtl_priv), &rtl_ops);
2142 ("%s : ieee80211 alloc failed\n", pci_name(pdev)));
2148 SET_IEEE80211_DEV(hw, &pdev->dev);
2149 pci_set_drvdata(pdev, hw);
2152 pcipriv = (void *)rtlpriv->priv;
2153 pcipriv->dev.pdev = pdev;
2155 /* init cfg & intf_ops */
2156 rtlpriv->rtlhal.interface = INTF_PCI;
2157 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
2158 rtlpriv->intf_ops = &rtl_pci_ops;
2159 rtlpriv->glb_var = &global_var;
2162 *init dbgp flags before all
2163 *other functions, because we will
2164 *use it in other functions like
2165 *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
2166 *you can not use these macro
2169 rtl_dbgp_flag_init(hw);
2172 err = pci_request_regions(pdev, KBUILD_MODNAME);
2174 RT_ASSERT(false, ("Can't obtain PCI resources\n"));
2178 pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id);
2179 pmem_len = pci_resource_len(pdev, rtlpriv->cfg->bar_id);
2180 pmem_flags = pci_resource_flags(pdev, rtlpriv->cfg->bar_id);
2182 /*shared mem start */
2183 rtlpriv->io.pci_mem_start =
2185 rtlpriv->cfg->bar_id, pmem_len);
2186 if (rtlpriv->io.pci_mem_start == NULL) {
2187 RT_ASSERT(false, ("Can't map PCI mem\n"));
2191 RT_TRACE(COMP_INIT, DBG_DMESG,
2192 ("mem mapped space: start: 0x%08lx len:%08lx "
2193 "flags:%08lx, after map:0x%p\n",
2194 pmem_start, pmem_len, pmem_flags,
2195 rtlpriv->io.pci_mem_start));
2197 /* Disable Clk Request */
2198 pci_write_config_byte(pdev, 0x81, 0);
2200 pci_write_config_byte(pdev, 0x44, 0);
2201 pci_write_config_byte(pdev, 0x04, 0x06);
2202 pci_write_config_byte(pdev, 0x04, 0x07);
2205 /* if chip not support, will return false */
2206 if(!_rtl_pci_find_adapter(pdev, hw))
2209 /* Init IO handler */
2210 _rtl_pci_io_handler_init(&pdev->dev, hw);
2212 /*like read eeprom and so on */
2213 rtlpriv->cfg->ops->read_eeprom_info(hw);
2215 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
2216 RT_TRACE(COMP_ERR, DBG_EMERG, ("Can't init_sw_vars.\n"));
2220 rtlpriv->cfg->ops->init_sw_leds(hw);
2223 rtl_pci_init_aspm(hw);
2225 /* Init mac80211 sw */
2226 err = rtl_init_core(hw);
2228 RT_TRACE(COMP_ERR, DBG_EMERG,
2229 ("Can't allocate sw for mac80211.\n"));
2234 err = !rtl_pci_init(hw, pdev);
2236 RT_TRACE(COMP_ERR, DBG_EMERG, ("Failed to init PCI.\n"));
2240 err = ieee80211_register_hw(hw);
2242 RT_TRACE(COMP_ERR, DBG_EMERG,
2243 ("Can't register mac80211 hw.\n"));
2246 rtlpriv->mac80211.mac80211_registered = 1;
2248 /* the wiphy must have been registed to
2249 * cfg80211 prior to regulatory_hint */
2250 if (regulatory_hint(hw->wiphy, rtlpriv->regd.alpha2)) {
2251 RT_TRACE(COMP_ERR, DBG_WARNING, ("regulatory_hint fail\n"));
2254 err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
2256 RT_TRACE(COMP_ERR, DBG_EMERG,
2257 ("failed to create sysfs device attributes\n"));
2261 rtl_proc_add_one(hw);
2264 rtl_init_rfkill(hw);
2266 rtlpci = rtl_pcidev(pcipriv);
2268 err = rtl_pci_intr_mode_decide(hw);
2270 RT_TRACE(COMP_INIT, DBG_DMESG,
2271 ("%s: failed to register IRQ handler\n",
2272 wiphy_name(hw->wiphy)));
2275 rtlpci->irq_alloc = 1;
2278 set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
2282 pci_set_drvdata(pdev, NULL);
2283 rtl_deinit_core(hw);
2284 ieee80211_free_hw(hw);
2286 if (rtlpriv->io.pci_mem_start != NULL)
2287 pci_iounmap(pdev, rtlpriv->io.pci_mem_start);
2290 pci_release_regions(pdev);
2294 pci_disable_device(pdev);
2299 /* EXPORT_SYMBOL(rtl_pci_probe); */
2301 struct ieee80211_hw *rtl_pci_get_hw_pointer(void)
2305 /* EXPORT_SYMBOL(rtl_pci_get_hw_pointer); */
2307 void rtl_pci_disconnect(struct pci_dev *pdev)
2309 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2310 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2311 struct rtl_priv *rtlpriv = rtl_priv(hw);
2312 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
2313 struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
2315 clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
2317 sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group);
2320 rtl_proc_remove_one(hw);
2323 /*ieee80211_unregister_hw will call ops_stop */
2324 if (rtlmac->mac80211_registered == 1) {
2325 ieee80211_unregister_hw(hw);
2326 rtlmac->mac80211_registered = 0;
2328 rtl_deinit_deferred_work(hw);
2329 rtlpriv->intf_ops->adapter_stop(hw);
2333 rtl_deinit_rfkill(hw);
2336 rtl_deinit_core(hw);
2337 rtlpriv->cfg->ops->deinit_sw_vars(hw);
2339 if (rtlpci->irq_alloc) {
2340 synchronize_irq(rtlpci->pdev->irq);
2341 free_irq(rtlpci->pdev->irq, hw);
2342 rtlpci->irq_alloc = 0;
2345 if (rtlpci->using_msi == true)
2346 pci_disable_msi(rtlpci->pdev);
2348 list_del(&rtlpriv->list);
2349 if (rtlpriv->io.pci_mem_start != NULL) {
2350 pci_iounmap(pdev, rtlpriv->io.pci_mem_start);
2351 pci_release_regions(pdev);
2354 pci_disable_device(pdev);
2356 rtl_pci_disable_aspm(hw);
2358 pci_set_drvdata(pdev, NULL);
2360 ieee80211_free_hw(hw);
2362 /* EXPORT_SYMBOL(rtl_pci_disconnect); */
2364 /***************************************
2365 kernel pci power state define:
2366 PCI_D0 ((pci_power_t __force) 0)
2367 PCI_D1 ((pci_power_t __force) 1)
2368 PCI_D2 ((pci_power_t __force) 2)
2369 PCI_D3hot ((pci_power_t __force) 3)
2370 PCI_D3cold ((pci_power_t __force) 4)
2371 PCI_UNKNOWN ((pci_power_t __force) 5)
2373 This function is called when system
2374 goes into suspend state mac80211 will
2375 call rtl_mac_stop() from the mac80211
2376 suspend function first, So there is
2377 no need to call hw_disable here.
2378 ****************************************/
2379 int rtl_pci_suspend(struct device *dev)
2381 struct pci_dev *pdev = to_pci_dev(dev);
2382 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2383 struct rtl_priv *rtlpriv = rtl_priv(hw);
2385 rtlpriv->cfg->ops->hw_suspend(hw);
2386 rtl_deinit_rfkill(hw);
2390 /* EXPORT_SYMBOL(rtl_pci_suspend); */
2392 int rtl_pci_resume(struct device *dev)
2394 struct pci_dev *pdev = to_pci_dev(dev);
2395 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2396 struct rtl_priv *rtlpriv = rtl_priv(hw);
2398 rtlpriv->cfg->ops->hw_resume(hw);
2399 rtl_init_rfkill(hw);
2403 /* EXPORT_SYMBOL(rtl_pci_resume); */
2405 struct rtl_intf_ops rtl_pci_ops = {
2406 .read_efuse_byte = read_efuse_byte,
2407 .adapter_start = rtl_pci_start,
2408 .adapter_stop = rtl_pci_stop,
2409 .check_buddy_priv = rtl_pci_check_buddy_priv,
2410 .adapter_tx = rtl_pci_tx,
2411 .flush = rtl_pci_flush,
2412 .reset_trx_ring = rtl_pci_reset_trx_ring,
2413 .waitq_insert = rtl_pci_tx_chk_waitq_insert,
2415 .disable_aspm = rtl_pci_disable_aspm,
2416 .enable_aspm = rtl_pci_enable_aspm,