staging: use pci_zalloc_consistent
[firefly-linux-kernel-4.4.55.git] / drivers / staging / rtl8821ae / pci.c
1 /******************************************************************************
2  *
3  * Copyright(c) 2009-2010  Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17  *
18  * The full GNU General Public License is included in this distribution in the
19  * file called LICENSE.
20  *
21  * Contact Information:
22  * wlanfae <wlanfae@realtek.com>
23  * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24  * Hsinchu 300, Taiwan.
25  *
26  * Larry Finger <Larry.Finger@lwfinger.net>
27  *
28  *****************************************************************************/
29
30 #include "core.h"
31 #include "wifi.h"
32 #include "pci.h"
33 #include "base.h"
34 #include "ps.h"
35 #include "efuse.h"
36 #include <linux/export.h>
37
38 static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
39         INTEL_VENDOR_ID,
40         ATI_VENDOR_ID,
41         AMD_VENDOR_ID,
42         SIS_VENDOR_ID
43 };
44
45 static const u8 ac_to_hwq[] = {
46         VO_QUEUE,
47         VI_QUEUE,
48         BE_QUEUE,
49         BK_QUEUE
50 };
51
52 u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw,
53                 struct sk_buff *skb)
54 {
55         struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
56         u16 fc = rtl_get_fc(skb);
57         u8 queue_index = skb_get_queue_mapping(skb);
58
59         if (unlikely(ieee80211_is_beacon(fc)))
60                 return BEACON_QUEUE;
61         if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
62                 return MGNT_QUEUE;
63         if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
64                 if (ieee80211_is_nullfunc(fc))
65                         return HIGH_QUEUE;
66
67         return ac_to_hwq[queue_index];
68 }
69
70 /* Update PCI dependent default settings*/
71 static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
72 {
73         struct rtl_priv *rtlpriv = rtl_priv(hw);
74         struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
75         struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
76         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
77         u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
78         u8 init_aspm;
79
80         ppsc->reg_rfps_level = 0;
81         ppsc->b_support_aspm = 0;
82
83         /*Update PCI ASPM setting */
84         ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
85         switch (rtlpci->const_pci_aspm) {
86         case 0:
87                 /*No ASPM */
88                 break;
89
90         case 1:
91                 /*ASPM dynamically enabled/disable. */
92                 ppsc->reg_rfps_level |= RT_RF_LPS_LEVEL_ASPM;
93                 break;
94
95         case 2:
96                 /*ASPM with Clock Req dynamically enabled/disable. */
97                 ppsc->reg_rfps_level |= (RT_RF_LPS_LEVEL_ASPM |
98                                          RT_RF_OFF_LEVL_CLK_REQ);
99                 break;
100
101         case 3:
102                 /*
103                  * Always enable ASPM and Clock Req
104                  * from initialization to halt.
105                  * */
106                 ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM);
107                 ppsc->reg_rfps_level |= (RT_RF_PS_LEVEL_ALWAYS_ASPM |
108                                          RT_RF_OFF_LEVL_CLK_REQ);
109                 break;
110
111         case 4:
112                 /*
113                  * Always enable ASPM without Clock Req
114                  * from initialization to halt.
115                  * */
116                 ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM |
117                                           RT_RF_OFF_LEVL_CLK_REQ);
118                 ppsc->reg_rfps_level |= RT_RF_PS_LEVEL_ALWAYS_ASPM;
119                 break;
120         }
121
122         ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
123
124         /*Update Radio OFF setting */
125         switch (rtlpci->const_hwsw_rfoff_d3) {
126         case 1:
127                 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
128                         ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
129                 break;
130
131         case 2:
132                 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
133                         ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
134                 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
135                 break;
136
137         case 3:
138                 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_PCI_D3;
139                 break;
140         }
141
142         /*Set HW definition to determine if it supports ASPM. */
143         switch (rtlpci->const_support_pciaspm) {
144         case 0:{
145                         /*Not support ASPM. */
146                         bool b_support_aspm = false;
147                         ppsc->b_support_aspm = b_support_aspm;
148                         break;
149                 }
150         case 1:{
151                         /*Support ASPM. */
152                         bool b_support_aspm = true;
153                         bool b_support_backdoor = true;
154                         ppsc->b_support_aspm = b_support_aspm;
155
156                         /*if(priv->oem_id == RT_CID_TOSHIBA &&
157                            !priv->ndis_adapter.amd_l1_patch)
158                            b_support_backdoor = false; */
159
160                         ppsc->b_support_backdoor = b_support_backdoor;
161
162                         break;
163                 }
164         case 2:
165                 /*ASPM value set by chipset. */
166                 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
167                         bool b_support_aspm = true;
168                         ppsc->b_support_aspm = b_support_aspm;
169                 }
170                 break;
171         default:
172                 RT_TRACE(COMP_ERR, DBG_EMERG,
173                          ("switch case not process \n"));
174                 break;
175         }
176
177         /* toshiba aspm issue, toshiba will set aspm selfly
178          * so we should not set aspm in driver */
179         pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm);
180         if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE &&
181                 init_aspm == 0x43)
182                 ppsc->b_support_aspm = false;
183 }
184
185 static bool _rtl_pci_platform_switch_device_pci_aspm(struct ieee80211_hw *hw,
186                                                      u8 value)
187 {
188         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
189         struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
190         bool bresult = false;
191
192         if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
193                 value |= 0x40;
194
195         pci_write_config_byte(rtlpci->pdev, 0x80, value);
196
197         return bresult;
198 }
199
200 /*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
201 static bool _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
202 {
203         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
204         struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
205         bool bresult = false;
206
207         pci_write_config_byte(rtlpci->pdev, 0x81, value);
208         bresult = true;
209
210         if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
211                 udelay(100);
212
213         return bresult;
214 }
215
216 /*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
217 static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
218 {
219         struct rtl_priv *rtlpriv = rtl_priv(hw);
220         struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
221         struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
222         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
223         u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
224         u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
225         u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
226         /*Retrieve original configuration settings. */
227         u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
228         u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
229                                 pcibridge_linkctrlreg;
230         u16 aspmlevel = 0;
231
232         if (!ppsc->b_support_aspm)
233                 return;
234
235         if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
236                 RT_TRACE(COMP_POWER, DBG_TRACE,
237                          ("PCI(Bridge) UNKNOWN.\n"));
238
239                 return;
240         }
241
242         if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
243                 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
244                 _rtl_pci_switch_clk_req(hw, 0x0);
245         }
246
247         if (1) {
248                 /*for promising device will in L0 state after an I/O. */
249                 u8 tmp_u1b;
250                 pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
251         }
252
253         /*Set corresponding value. */
254         aspmlevel |= BIT(0) | BIT(1);
255         linkctrl_reg &= ~aspmlevel;
256         pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
257
258         _rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
259         udelay(50);
260
261         /*4 Disable Pci Bridge ASPM */
262         rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
263                                      pcicfg_addrport + (num4bytes << 2));
264         rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, pcibridge_linkctrlreg);
265
266         udelay(50);
267
268 }
269
270 /*
271  *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
272  *power saving We should follow the sequence to enable
273  *RTL8192SE first then enable Pci Bridge ASPM
274  *or the system will show bluescreen.
275  */
276 static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
277 {
278         struct rtl_priv *rtlpriv = rtl_priv(hw);
279         struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
280         struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
281         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
282         u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
283         u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
284         u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
285         u16 aspmlevel;
286         u8 u_pcibridge_aspmsetting;
287         u8 u_device_aspmsetting;
288
289         if (!ppsc->b_support_aspm)
290                 return;
291
292         if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
293                 RT_TRACE(COMP_POWER, DBG_TRACE,
294                          ("PCI(Bridge) UNKNOWN.\n"));
295                 return;
296         }
297
298         /*4 Enable Pci Bridge ASPM */
299         rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
300                                      pcicfg_addrport + (num4bytes << 2));
301
302         u_pcibridge_aspmsetting =
303             pcipriv->ndis_adapter.pcibridge_linkctrlreg |
304             rtlpci->const_hostpci_aspm_setting;
305
306         if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
307                 u_pcibridge_aspmsetting &= ~BIT(0);
308
309         rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, u_pcibridge_aspmsetting);
310
311         RT_TRACE(COMP_INIT, DBG_LOUD,
312                  ("PlatformEnableASPM(): Write reg[%x] = %x\n",
313                   (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
314                   u_pcibridge_aspmsetting));
315
316         udelay(50);
317
318         /*Get ASPM level (with/without Clock Req) */
319         aspmlevel = rtlpci->const_devicepci_aspm_setting;
320         u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
321
322         /*_rtl_pci_platform_switch_device_pci_aspm(dev,*/
323         /*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */
324
325         u_device_aspmsetting |= aspmlevel;
326
327         _rtl_pci_platform_switch_device_pci_aspm(hw, u_device_aspmsetting);
328
329         if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
330                 _rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
331                                              RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
332                 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
333         }
334         udelay(100);
335 }
336
337 static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
338 {
339         struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
340         u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
341
342         bool status = false;
343         u8 offset_e0;
344         unsigned offset_e4;
345
346         rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
347                         pcicfg_addrport + 0xE0);
348         rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, 0xA0);
349
350         rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
351                         pcicfg_addrport + 0xE0);
352         rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &offset_e0);
353
354         if (offset_e0 == 0xA0) {
355                 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
356                                              pcicfg_addrport + 0xE4);
357                 rtl_pci_raw_read_port_ulong(PCI_CONF_DATA, &offset_e4);
358                 if (offset_e4 & BIT(23))
359                         status = true;
360         }
361
362         return status;
363 }
364
365 bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
366                               struct rtl_priv **buddy_priv)
367 {
368         struct rtl_priv *rtlpriv = rtl_priv(hw);
369         struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
370         bool b_find_buddy_priv = false;
371         struct rtl_priv *temp_priv = NULL;
372         struct rtl_pci_priv *temp_pcipriv = NULL;
373
374         if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
375                 list_for_each_entry(temp_priv, &rtlpriv->glb_var->glb_priv_list,
376                         list) {
377                         if (temp_priv) {
378                                 temp_pcipriv =
379                                         (struct rtl_pci_priv *)temp_priv->priv;
380                                 RT_TRACE(COMP_INIT, DBG_LOUD,
381                                         (("pcipriv->ndis_adapter.funcnumber %x \n"),
382                                         pcipriv->ndis_adapter.funcnumber));
383                                 RT_TRACE(COMP_INIT, DBG_LOUD,
384                                         (("temp_pcipriv->ndis_adapter.funcnumber %x \n"),
385                                         temp_pcipriv->ndis_adapter.funcnumber));
386
387                                 if ((pcipriv->ndis_adapter.busnumber ==
388                                         temp_pcipriv->ndis_adapter.busnumber) &&
389                                     (pcipriv->ndis_adapter.devnumber ==
390                                     temp_pcipriv->ndis_adapter.devnumber) &&
391                                     (pcipriv->ndis_adapter.funcnumber !=
392                                     temp_pcipriv->ndis_adapter.funcnumber)) {
393                                         b_find_buddy_priv = true;
394                                         break;
395                                 }
396                         }
397                 }
398         }
399
400         RT_TRACE(COMP_INIT, DBG_LOUD,
401                 (("b_find_buddy_priv %d \n"), b_find_buddy_priv));
402
403         if (b_find_buddy_priv)
404                 *buddy_priv = temp_priv;
405
406         return b_find_buddy_priv;
407 }
408
409 void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
410 {
411         struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
412         u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
413         u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
414         u8 linkctrl_reg;
415         u8 num4bbytes;
416
417         num4bbytes = (capabilityoffset + 0x10) / 4;
418
419         /*Read  Link Control Register */
420         rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
421                                      pcicfg_addrport + (num4bbytes << 2));
422         rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &linkctrl_reg);
423
424         pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
425 }
426
427 static void rtl_pci_parse_configuration(struct pci_dev *pdev,
428                                         struct ieee80211_hw *hw)
429 {
430         struct rtl_priv *rtlpriv = rtl_priv(hw);
431         struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
432
433         u8 tmp;
434         int pos;
435         u8 linkctrl_reg;
436
437         /*Link Control Register */
438         pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
439         pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg);
440         pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg;
441
442         RT_TRACE(COMP_INIT, DBG_TRACE,
443                  ("Link Control Register =%x\n",
444                   pcipriv->ndis_adapter.linkctrl_reg));
445
446         pci_read_config_byte(pdev, 0x98, &tmp);
447         tmp |= BIT(4);
448         pci_write_config_byte(pdev, 0x98, tmp);
449
450         tmp = 0x17;
451         pci_write_config_byte(pdev, 0x70f, tmp);
452 }
453
454 static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
455 {
456         struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
457
458         _rtl_pci_update_default_setting(hw);
459
460         if (ppsc->reg_rfps_level & RT_RF_PS_LEVEL_ALWAYS_ASPM) {
461                 /*Always enable ASPM & Clock Req. */
462                 rtl_pci_enable_aspm(hw);
463                 RT_SET_PS_LEVEL(ppsc, RT_RF_PS_LEVEL_ALWAYS_ASPM);
464         }
465
466 }
467
468 static void _rtl_pci_io_handler_init(struct device *dev,
469                                      struct ieee80211_hw *hw)
470 {
471         struct rtl_priv *rtlpriv = rtl_priv(hw);
472
473         rtlpriv->io.dev = dev;
474
475         rtlpriv->io.write8_async = pci_write8_async;
476         rtlpriv->io.write16_async = pci_write16_async;
477         rtlpriv->io.write32_async = pci_write32_async;
478
479         rtlpriv->io.read8_sync = pci_read8_sync;
480         rtlpriv->io.read16_sync = pci_read16_sync;
481         rtlpriv->io.read32_sync = pci_read32_sync;
482
483 }
484
485 static bool _rtl_pci_update_earlymode_info(struct ieee80211_hw *hw,
486                                            struct sk_buff *skb,
487                                            struct rtl_tcb_desc *tcb_desc,
488                                            u8 tid)
489 {
490         struct rtl_priv *rtlpriv = rtl_priv(hw);
491         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
492         struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
493         u8 additionlen = FCS_LEN;
494         struct sk_buff *next_skb;
495
496         /* here open is 4, wep/tkip is 8, aes is 12*/
497         if (info->control.hw_key)
498                 additionlen += info->control.hw_key->icv_len;
499
500         /* The most skb num is 6 */
501         tcb_desc->empkt_num = 0;
502         spin_lock_bh(&rtlpriv->locks.waitq_lock);
503         skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) {
504                 struct ieee80211_tx_info *next_info =
505                                         IEEE80211_SKB_CB(next_skb);
506                 if (next_info->flags & IEEE80211_TX_CTL_AMPDU) {
507                         tcb_desc->empkt_len[tcb_desc->empkt_num] =
508                                 next_skb->len + additionlen;
509                         tcb_desc->empkt_num++;
510                 } else {
511                         break;
512                 }
513
514                 if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid],
515                                       next_skb))
516                         break;
517
518                 if (tcb_desc->empkt_num >= rtlhal->max_earlymode_num)
519                         break;
520         }
521         spin_unlock_bh(&rtlpriv->locks.waitq_lock);
522         return true;
523 }
524
525 /* just for early mode now */
526 static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
527 {
528         struct rtl_priv *rtlpriv = rtl_priv(hw);
529         struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
530         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
531         struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
532         struct sk_buff *skb = NULL;
533         struct ieee80211_tx_info *info = NULL;
534         int tid; /* should be int */
535
536         if (!rtlpriv->rtlhal.b_earlymode_enable)
537                 return;
538         if (rtlpriv->dm.supp_phymode_switch &&
539                 (rtlpriv->easy_concurrent_ctl.bswitch_in_process ||
540                 (rtlpriv->buddy_priv &&
541                  rtlpriv->buddy_priv->easy_concurrent_ctl.bswitch_in_process)))
542                 return;
543         /* we just use em for BE/BK/VI/VO */
544         for (tid = 7; tid >= 0; tid--) {
545                 u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(hw, tid)];
546                 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
547                 while (!mac->act_scanning &&
548                        rtlpriv->psc.rfpwr_state == ERFON) {
549                         struct rtl_tcb_desc tcb_desc;
550                         memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
551
552                         spin_lock_bh(&rtlpriv->locks.waitq_lock);
553                         if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
554                             (ring->entries - skb_queue_len(&ring->queue) >
555                              rtlhal->max_earlymode_num)) {
556                                 skb = skb_dequeue(&mac->skb_waitq[tid]);
557                         } else {
558                                 spin_unlock_bh(&rtlpriv->locks.waitq_lock);
559                                 break;
560                         }
561                         spin_unlock_bh(&rtlpriv->locks.waitq_lock);
562
563                         /* Some macaddr can't do early mode. like
564                          * multicast/broadcast/no_qos data */
565                         info = IEEE80211_SKB_CB(skb);
566                         if (info->flags & IEEE80211_TX_CTL_AMPDU)
567                                 _rtl_pci_update_earlymode_info(hw, skb,
568                                                                &tcb_desc, tid);
569
570                         rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
571                 }
572         }
573 }
574
575 static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
576 {
577         struct rtl_priv *rtlpriv = rtl_priv(hw);
578         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
579         struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
580
581         while (skb_queue_len(&ring->queue)) {
582                 struct sk_buff *skb;
583                 struct ieee80211_tx_info *info;
584                 u16 fc;
585                 u8 tid;
586                 u8 *entry;
587
588
589                 if (rtlpriv->use_new_trx_flow)
590                         entry = (u8 *)(&ring->buffer_desc[ring->idx]);
591                 else
592                         entry = (u8 *)(&ring->desc[ring->idx]);
593
594                 if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
595                         return;
596
597                 ring->idx = (ring->idx + 1) % ring->entries;
598
599                 skb = __skb_dequeue(&ring->queue);
600
601                 pci_unmap_single(rtlpci->pdev,
602                                  le32_to_cpu(rtlpriv->cfg->ops->
603                                              get_desc((u8 *) entry, true,
604                                                       HW_DESC_TXBUFF_ADDR)),
605                                  skb->len, PCI_DMA_TODEVICE);
606
607                 /* remove early mode header */
608                 if(rtlpriv->rtlhal.b_earlymode_enable)
609                         skb_pull(skb, EM_HDR_LEN);
610
611                 RT_TRACE((COMP_INTR | COMP_SEND), DBG_TRACE,
612                          ("new ring->idx:%d, "
613                           "free: skb_queue_len:%d, free: seq:%d\n",
614                           ring->idx,
615                           skb_queue_len(&ring->queue),
616                           *(u16 *) (skb->data + 22)));
617
618                 if(prio == TXCMD_QUEUE) {
619                         dev_kfree_skb(skb);
620                         goto tx_status_ok;
621
622                 }
623
624                 /* for sw LPS, just after NULL skb send out, we can
625                  * sure AP known we are slept, our we should not let
626                  * rf to sleep*/
627                 fc = rtl_get_fc(skb);
628                 if (ieee80211_is_nullfunc(fc)) {
629                         if(ieee80211_has_pm(fc)) {
630                                 rtlpriv->mac80211.offchan_deley = true;
631                                 rtlpriv->psc.state_inap = 1;
632                         } else {
633                                 rtlpriv->psc.state_inap = 0;
634                         }
635                 }
636                 if (ieee80211_is_action(fc)) {
637                         struct ieee80211_mgmt_compat *action_frame =
638                                 (struct ieee80211_mgmt_compat *)skb->data;
639                         if (action_frame->u.action.u.ht_smps.action ==
640                                 WLAN_HT_ACTION_SMPS) {
641                                 dev_kfree_skb(skb);
642                                 goto tx_status_ok;
643                         }
644                 }
645
646                 /* update tid tx pkt num */
647                 tid = rtl_get_tid(skb);
648                 if (tid <= 7)
649                         rtlpriv->link_info.tidtx_inperiod[tid]++;
650
651                 info = IEEE80211_SKB_CB(skb);
652                 ieee80211_tx_info_clear_status(info);
653
654                 info->flags |= IEEE80211_TX_STAT_ACK;
655                 /*info->status.rates[0].count = 1; */
656
657                 ieee80211_tx_status_irqsafe(hw, skb);
658
659                 if ((ring->entries - skb_queue_len(&ring->queue))
660                                 == 2) {
661
662                         RT_TRACE(COMP_ERR, DBG_LOUD,
663                                         ("more desc left, wake"
664                                          "skb_queue@%d,ring->idx = %d,"
665                                          "skb_queue_len = 0x%d\n",
666                                          prio, ring->idx,
667                                          skb_queue_len(&ring->queue)));
668
669                         ieee80211_wake_queue(hw,
670                                         skb_get_queue_mapping
671                                         (skb));
672                 }
673 tx_status_ok:
674                 skb = NULL;
675         }
676
677         if (((rtlpriv->link_info.num_rx_inperiod +
678                 rtlpriv->link_info.num_tx_inperiod) > 8) ||
679                 (rtlpriv->link_info.num_rx_inperiod > 2)) {
680                 rtl_lps_leave(hw);
681         }
682 }
683
684 static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
685         u8 *entry, int rxring_idx, int desc_idx)
686 {
687         struct rtl_priv *rtlpriv = rtl_priv(hw);
688         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
689         u32 bufferaddress;
690         u8 tmp_one = 1;
691         struct sk_buff *skb;
692
693         skb = dev_alloc_skb(rtlpci->rxbuffersize);
694         if (!skb)
695                 return 0;
696         rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
697
698         /* just set skb->cb to mapping addr
699          * for pci_unmap_single use */
700         *((dma_addr_t *) skb->cb) = pci_map_single(rtlpci->pdev,
701                                 skb_tail_pointer(skb), rtlpci->rxbuffersize,
702                                 PCI_DMA_FROMDEVICE);
703         bufferaddress = cpu_to_le32(*((dma_addr_t *) skb->cb));
704         if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
705                 return 0;
706         if (rtlpriv->use_new_trx_flow) {
707                 rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
708                                             HW_DESC_RX_PREPARE,
709                                             (u8 *) & bufferaddress);
710         } else {
711                 rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
712                                             HW_DESC_RXBUFF_ADDR,
713                                             (u8 *) & bufferaddress);
714                 rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
715                                             HW_DESC_RXPKT_LEN,
716                                             (u8 *) & rtlpci->rxbuffersize);
717                 rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
718                                             HW_DESC_RXOWN,
719                                             (u8 *) & tmp_one);
720         }
721
722         return 1;
723 }
724
725 /* In order to receive 8K AMSDU we have set skb to
726  * 9100bytes in init rx ring, but if this packet is
727  * not a AMSDU, this so big packet will be sent to
728  * TCP/IP directly, this cause big packet ping fail
729  * like: "ping -s 65507", so here we will realloc skb
730  * based on the true size of packet, I think mac80211
731  * do it will be better, but now mac80211 haven't */
732
733 /* but some platform will fail when alloc skb sometimes.
734  * in this condition, we will send the old skb to
735  * mac80211 directly, this will not cause any other
736  * issues, but only be lost by TCP/IP */
737 static void _rtl_pci_rx_to_mac80211(struct ieee80211_hw *hw,
738         struct sk_buff *skb, struct ieee80211_rx_status rx_status)
739 {
740         if (unlikely(!rtl_action_proc(hw, skb, false))) {
741                 dev_kfree_skb_any(skb);
742         } else {
743                 struct sk_buff *uskb = NULL;
744                 u8 *pdata;
745
746                 uskb = dev_alloc_skb(skb->len + 128);
747                 if (likely(uskb)) {
748                         memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status,
749                                         sizeof(rx_status));
750                         pdata = (u8 *)skb_put(uskb, skb->len);
751                         memcpy(pdata, skb->data, skb->len);
752                         dev_kfree_skb_any(skb);
753
754                         ieee80211_rx_irqsafe(hw, uskb);
755                 } else {
756                         ieee80211_rx_irqsafe(hw, skb);
757                 }
758         }
759 }
760
761 /*hsisr interrupt handler*/
762 static void _rtl_pci_hs_interrupt(struct ieee80211_hw *hw)
763 {
764         struct rtl_priv *rtlpriv = rtl_priv(hw);
765         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
766
767         rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR],
768                        rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[MAC_HSISR]) |
769                        rtlpci->sys_irq_mask);
770
771
772 }
773 static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
774 {
775         struct rtl_priv *rtlpriv = rtl_priv(hw);
776         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
777         int rxring_idx = RTL_PCI_RX_MPDU_QUEUE;
778
779         struct ieee80211_rx_status rx_status = { 0 };
780         unsigned int count = rtlpci->rxringcount;
781         bool unicast = false;
782         u8 hw_queue = 0;
783         unsigned int rx_remained_cnt;
784         u8 own;
785         u8 tmp_one;
786
787         struct rtl_stats status = {
788                 .signal = 0,
789                 .noise = -98,
790                 .rate = 0,
791         };
792
793         /*RX NORMAL PKT */
794         while (count--) {
795                 struct ieee80211_hdr *hdr;
796                 u16 fc;
797                 u16 len;
798                 /*rx buffer descriptor */
799                 struct rtl_rx_buffer_desc *buffer_desc = NULL;
800                 /*if use new trx flow, it means wifi info */
801                 struct rtl_rx_desc *pdesc = NULL;
802                 /*rx pkt */
803                 struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
804                                         rtlpci->rx_ring[rxring_idx].idx];
805
806                 if (rtlpriv->use_new_trx_flow) {
807                         rx_remained_cnt =
808                                 rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
809                                                                       hw_queue);
810                         if (rx_remained_cnt < 1)
811                                 return;
812
813                 } else {        /* rx descriptor */
814                         pdesc = &rtlpci->rx_ring[rxring_idx].desc[
815                                 rtlpci->rx_ring[rxring_idx].idx];
816
817                         own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
818                                                                false,
819                                                                HW_DESC_OWN);
820                         if (own) /* wait data to be filled by hardware */
821                                 return;
822                 }
823
824                 /* Get here means: data is filled already*/
825                 /* AAAAAAttention !!!
826                  * We can NOT access 'skb' before 'pci_unmap_single' */
827                 pci_unmap_single(rtlpci->pdev, *((dma_addr_t *) skb->cb),
828                                  rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
829
830                 if (rtlpriv->use_new_trx_flow) {
831                         buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
832                                 rtlpci->rx_ring[rxring_idx].idx];
833                         /*means rx wifi info*/
834                         pdesc = (struct rtl_rx_desc *)skb->data;
835                 }
836
837                 rtlpriv->cfg->ops->query_rx_desc(hw, &status,
838                                                  &rx_status, (u8 *) pdesc, skb);
839
840                 if (rtlpriv->use_new_trx_flow)
841                         rtlpriv->cfg->ops->rx_check_dma_ok(hw,
842                                                            (u8 *)buffer_desc,
843                                                            hw_queue);
844
845
846                 len = rtlpriv->cfg->ops->get_desc((u8 *)pdesc, false,
847                                                   HW_DESC_RXPKT_LEN);
848
849                 if (skb->end - skb->tail > len) {
850                         skb_put(skb, len);
851                         if (rtlpriv->use_new_trx_flow)
852                                 skb_reserve(skb, status.rx_drvinfo_size +
853                                                  status.rx_bufshift + 24);
854                         else
855                                 skb_reserve(skb, status.rx_drvinfo_size +
856                                                  status.rx_bufshift);
857
858                 } else {
859                         printk("skb->end - skb->tail = %d, len is %d\n",
860                                skb->end - skb->tail, len);
861                         break;
862                 }
863
864                 rtlpriv->cfg->ops->rx_command_packet_handler(hw, &status, skb);
865
866                 /*
867                  *NOTICE This can not be use for mac80211,
868                  *this is done in mac80211 code,
869                  *if you done here sec DHCP will fail
870                  *skb_trim(skb, skb->len - 4);
871                  */
872
873                 hdr = rtl_get_hdr(skb);
874                 fc = rtl_get_fc(skb);
875
876                 if (!status.b_crc && !status.b_hwerror) {
877                         memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
878                                sizeof(rx_status));
879
880                         if (is_broadcast_ether_addr(hdr->addr1)) {
881                                 ;/*TODO*/
882                         } else if (is_multicast_ether_addr(hdr->addr1)) {
883                                 ;/*TODO*/
884                         } else {
885                                 unicast = true;
886                                 rtlpriv->stats.rxbytesunicast += skb->len;
887                         }
888
889                         rtl_is_special_data(hw, skb, false);
890
891                         if (ieee80211_is_data(fc)) {
892                                 rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
893
894                                 if (unicast)
895                                         rtlpriv->link_info.num_rx_inperiod++;
896                         }
897
898                         /* static bcn for roaming */
899                         rtl_beacon_statistic(hw, skb);
900                         rtl_p2p_info(hw, (void*)skb->data, skb->len);
901                         /* for sw lps */
902                         rtl_swlps_beacon(hw, (void*)skb->data, skb->len);
903                         rtl_recognize_peer(hw, (void*)skb->data, skb->len);
904                         if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) &&
905                             (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)&&
906                             (ieee80211_is_beacon(fc) ||
907                              ieee80211_is_probe_resp(fc))) {
908                                 dev_kfree_skb_any(skb);
909                         } else {
910                                 _rtl_pci_rx_to_mac80211(hw, skb, rx_status);
911                         }
912                 } else {
913                         dev_kfree_skb_any(skb);
914                 }
915                 if (rtlpriv->use_new_trx_flow) {
916                         rtlpci->rx_ring[hw_queue].next_rx_rp += 1;
917                         rtlpci->rx_ring[hw_queue].next_rx_rp %=
918                                                         RTL_PCI_MAX_RX_COUNT;
919
920
921                         rx_remained_cnt--;
922                         if (1/*rx_remained_cnt == 0*/) {
923                                 rtl_write_word(rtlpriv, 0x3B4,
924                                         rtlpci->rx_ring[hw_queue].next_rx_rp);
925                         }
926                 }
927                 if (((rtlpriv->link_info.num_rx_inperiod +
928                       rtlpriv->link_info.num_tx_inperiod) > 8) ||
929                     (rtlpriv->link_info.num_rx_inperiod > 2)) {
930                         rtl_lps_leave(hw);
931                 }
932
933                 if (rtlpriv->use_new_trx_flow) {
934                         _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc,
935                                                  rxring_idx,
936                                                rtlpci->rx_ring[rxring_idx].idx);
937                 } else {
938                         _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
939                                                rtlpci->rx_ring[rxring_idx].idx);
940
941                         if (rtlpci->rx_ring[rxring_idx].idx ==
942                             rtlpci->rxringcount - 1)
943                                 rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc,
944                                                             false,
945                                                             HW_DESC_RXERO,
946                                                             (u8 *) & tmp_one);
947                 }
948                 rtlpci->rx_ring[rxring_idx].idx =
949                                 (rtlpci->rx_ring[rxring_idx].idx + 1) %
950                                 rtlpci->rxringcount;
951         }
952 }
953
954 static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
955 {
956         struct ieee80211_hw *hw = dev_id;
957         struct rtl_priv *rtlpriv = rtl_priv(hw);
958         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
959         struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
960         unsigned long flags;
961         u32 inta = 0;
962         u32 intb = 0;
963
964
965
966         if (rtlpci->irq_enabled == 0)
967                 return IRQ_HANDLED;
968
969         spin_lock_irqsave(&rtlpriv->locks.irq_th_lock,flags);
970
971
972         rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMR], 0x0);
973
974
975         rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMRE], 0x0);
976
977
978         /*read ISR: 4/8bytes */
979         rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
980
981
982         /*Shared IRQ or HW disappeared */
983         if (!inta || inta == 0xffff)
984                 goto done;
985         /*<1> beacon related */
986         if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
987                 RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon ok interrupt!\n"));
988         }
989
990         if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) {
991                 RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon err interrupt!\n"));
992         }
993
994         if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) {
995                 RT_TRACE(COMP_INTR, DBG_TRACE, ("beacon interrupt!\n"));
996         }
997
998         if (inta & rtlpriv->cfg->maps[RTL_IMR_BcnInt]) {
999                 RT_TRACE(COMP_INTR, DBG_TRACE,
1000                          ("prepare beacon for interrupt!\n"));
1001                 tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
1002         }
1003
1004
1005         /*<2> tx related */
1006         if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
1007                 RT_TRACE(COMP_ERR, DBG_TRACE, ("IMR_TXFOVW!\n"));
1008
1009         if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
1010                 RT_TRACE(COMP_INTR, DBG_TRACE, ("Manage ok interrupt!\n"));
1011                 _rtl_pci_tx_isr(hw, MGNT_QUEUE);
1012         }
1013
1014         if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
1015                 RT_TRACE(COMP_INTR, DBG_TRACE, ("HIGH_QUEUE ok interrupt!\n"));
1016                 _rtl_pci_tx_isr(hw, HIGH_QUEUE);
1017         }
1018
1019         if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
1020                 rtlpriv->link_info.num_tx_inperiod++;
1021
1022                 RT_TRACE(COMP_INTR, DBG_TRACE, ("BK Tx OK interrupt!\n"));
1023                 _rtl_pci_tx_isr(hw, BK_QUEUE);
1024         }
1025
1026         if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
1027                 rtlpriv->link_info.num_tx_inperiod++;
1028
1029                 RT_TRACE(COMP_INTR, DBG_TRACE, ("BE TX OK interrupt!\n"));
1030                 _rtl_pci_tx_isr(hw, BE_QUEUE);
1031         }
1032
1033         if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
1034                 rtlpriv->link_info.num_tx_inperiod++;
1035
1036                 RT_TRACE(COMP_INTR, DBG_TRACE, ("VI TX OK interrupt!\n"));
1037                 _rtl_pci_tx_isr(hw, VI_QUEUE);
1038         }
1039
1040         if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
1041                 rtlpriv->link_info.num_tx_inperiod++;
1042
1043                 RT_TRACE(COMP_INTR, DBG_TRACE, ("Vo TX OK interrupt!\n"));
1044                 _rtl_pci_tx_isr(hw, VO_QUEUE);
1045         }
1046
1047         if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
1048                 if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
1049                         rtlpriv->link_info.num_tx_inperiod++;
1050
1051                         RT_TRACE(COMP_INTR, DBG_TRACE,
1052                                  ("CMD TX OK interrupt!\n"));
1053                         _rtl_pci_tx_isr(hw, TXCMD_QUEUE);
1054                 }
1055         }
1056
1057         /*<3> rx related */
1058         if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
1059                 RT_TRACE(COMP_INTR, DBG_TRACE, ("Rx ok interrupt!\n"));
1060
1061                 _rtl_pci_rx_interrupt(hw);
1062
1063         }
1064
1065         if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
1066                 RT_TRACE(COMP_ERR, DBG_WARNING,
1067                          ("rx descriptor unavailable!\n"));
1068                 rtl_write_byte(rtlpriv, 0xb4, BIT(1) );
1069                 _rtl_pci_rx_interrupt(hw);
1070         }
1071
1072         if (unlikely(intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
1073                 RT_TRACE(COMP_ERR, DBG_WARNING, ("rx overflow !\n"));
1074                 _rtl_pci_rx_interrupt(hw);
1075         }
1076
1077         /*<4> fw related*/
1078         if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
1079                 if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
1080                         RT_TRACE(COMP_INTR, DBG_TRACE,
1081                                  ("firmware interrupt!\n"));
1082                         queue_delayed_work(rtlpriv->works.rtl_wq,
1083                                            &rtlpriv->works.fwevt_wq, 0);
1084                 }
1085         }
1086
1087         /*<5> hsisr related*/
1088         /* Only 8188EE & 8723BE Supported.
1089          * If Other ICs Come in, System will corrupt,
1090          * because maps[RTL_IMR_HSISR_IND] & maps[MAC_HSISR]
1091          * are not initialized*/
1092         if (rtlhal->hw_type == HARDWARE_TYPE_RTL8188EE ||
1093             rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
1094                 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
1095                         RT_TRACE(COMP_INTR, DBG_TRACE,
1096                                          ("hsisr interrupt!\n"));
1097                         _rtl_pci_hs_interrupt(hw);
1098                 }
1099         }
1100
1101
1102         if(rtlpriv->rtlhal.b_earlymode_enable)
1103                 tasklet_schedule(&rtlpriv->works.irq_tasklet);
1104
1105         rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMR],
1106                         rtlpci->irq_mask[0]);
1107         rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[MAC_HIMRE],
1108                         rtlpci->irq_mask[1]);
1109         spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1110
1111         return IRQ_HANDLED;
1112
1113 done:
1114         spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1115         return IRQ_HANDLED;
1116 }
1117
1118 static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
1119 {
1120         _rtl_pci_tx_chk_waitq(hw);
1121 }
1122
1123 static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
1124 {
1125         struct rtl_priv *rtlpriv = rtl_priv(hw);
1126         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1127         struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1128         struct rtl8192_tx_ring *ring = NULL;
1129         struct ieee80211_hdr *hdr = NULL;
1130         struct ieee80211_tx_info *info = NULL;
1131         struct sk_buff *pskb = NULL;
1132         struct rtl_tx_desc *pdesc = NULL;
1133         struct rtl_tcb_desc tcb_desc;
1134         /*This is for new trx flow*/
1135         struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
1136         u8 temp_one = 1;
1137
1138         memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
1139         ring = &rtlpci->tx_ring[BEACON_QUEUE];
1140         pskb = __skb_dequeue(&ring->queue);
1141         if (pskb)
1142                 kfree_skb(pskb);
1143
1144         /*NB: the beacon data buffer must be 32-bit aligned. */
1145         pskb = ieee80211_beacon_get(hw, mac->vif);
1146         if (pskb == NULL)
1147                 return;
1148         hdr = rtl_get_hdr(pskb);
1149         info = IEEE80211_SKB_CB(pskb);
1150         pdesc = &ring->desc[0];
1151         if (rtlpriv->use_new_trx_flow)
1152                 pbuffer_desc = &ring->buffer_desc[0];
1153
1154         rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
1155                                         (u8 *)pbuffer_desc, info, NULL, pskb,
1156                                         BEACON_QUEUE, &tcb_desc);
1157
1158         __skb_queue_tail(&ring->queue, pskb);
1159
1160         rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true, HW_DESC_OWN,
1161                                     (u8 *) & temp_one);
1162
1163         return;
1164 }
1165
1166 static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
1167 {
1168         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1169         struct rtl_priv *rtlpriv = rtl_priv(hw);
1170         struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1171         u8 i;
1172         u16 desc_num;
1173
1174         if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE)
1175                 desc_num = TX_DESC_NUM_92E;
1176         else
1177                 desc_num = RT_TXDESC_NUM;
1178
1179         for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1180                 rtlpci->txringcount[i] = desc_num;
1181         }
1182         /*
1183          *we just alloc 2 desc for beacon queue,
1184          *because we just need first desc in hw beacon.
1185          */
1186         rtlpci->txringcount[BEACON_QUEUE] = 2;
1187
1188         /*
1189          *BE queue need more descriptor for performance
1190          *consideration or, No more tx desc will happen,
1191          *and may cause mac80211 mem leakage.
1192          */
1193         if (rtl_priv(hw)->use_new_trx_flow == false)
1194                 rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE;
1195
1196         rtlpci->rxbuffersize = 9100;    /*2048/1024; */
1197         rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT;     /*64; */
1198 }
1199
1200 static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
1201                 struct pci_dev *pdev)
1202 {
1203         struct rtl_priv *rtlpriv = rtl_priv(hw);
1204         struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1205         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1206         struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1207
1208         rtlpriv->rtlhal.up_first_time = true;
1209         rtlpriv->rtlhal.being_init_adapter = false;
1210
1211         rtlhal->hw = hw;
1212         rtlpci->pdev = pdev;
1213
1214         /*Tx/Rx related var */
1215         _rtl_pci_init_trx_var(hw);
1216
1217         /*IBSS*/ mac->beacon_interval = 100;
1218
1219         /*AMPDU*/
1220         mac->min_space_cfg = 0;
1221         mac->max_mss_density = 0;
1222         /*set sane AMPDU defaults */
1223         mac->current_ampdu_density = 7;
1224         mac->current_ampdu_factor = 3;
1225
1226         /*QOS*/
1227         rtlpci->acm_method = eAcmWay2_SW;
1228
1229         /*task */
1230         tasklet_init(&rtlpriv->works.irq_tasklet,
1231                      (void (*)(unsigned long))_rtl_pci_irq_tasklet,
1232                      (unsigned long)hw);
1233         tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
1234                      (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
1235                      (unsigned long)hw);
1236 }
1237
1238 static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
1239                                  unsigned int prio, unsigned int entries)
1240 {
1241         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1242         struct rtl_priv *rtlpriv = rtl_priv(hw);
1243         struct rtl_tx_buffer_desc *buffer_desc;
1244         struct rtl_tx_desc *desc;
1245         dma_addr_t buffer_desc_dma, desc_dma;
1246         u32 nextdescaddress;
1247         int i;
1248
1249         /* alloc tx buffer desc for new trx flow*/
1250         if (rtlpriv->use_new_trx_flow) {
1251                 buffer_desc =
1252                         pci_zalloc_consistent(rtlpci->pdev,
1253                                               sizeof(*buffer_desc) * entries,
1254                                               &buffer_desc_dma);
1255
1256                 if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) {
1257                         RT_TRACE(COMP_ERR, DBG_EMERG,
1258                                  ("Cannot allocate TX ring (prio = %d)\n",
1259                                  prio));
1260                         return -ENOMEM;
1261                 }
1262
1263                 rtlpci->tx_ring[prio].buffer_desc = buffer_desc;
1264                 rtlpci->tx_ring[prio].buffer_desc_dma = buffer_desc_dma;
1265
1266                 rtlpci->tx_ring[prio].cur_tx_rp = 0;
1267                 rtlpci->tx_ring[prio].cur_tx_wp = 0;
1268                 rtlpci->tx_ring[prio].avl_desc = entries;
1269
1270         }
1271
1272         /* alloc dma for this ring */
1273         desc = pci_zalloc_consistent(rtlpci->pdev, sizeof(*desc) * entries,
1274                                      &desc_dma);
1275
1276         if (!desc || (unsigned long)desc & 0xFF) {
1277                 RT_TRACE(COMP_ERR, DBG_EMERG,
1278                          ("Cannot allocate TX ring (prio = %d)\n", prio));
1279                 return -ENOMEM;
1280         }
1281
1282         rtlpci->tx_ring[prio].desc = desc;
1283         rtlpci->tx_ring[prio].dma = desc_dma;
1284
1285         rtlpci->tx_ring[prio].idx = 0;
1286         rtlpci->tx_ring[prio].entries = entries;
1287         skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
1288         RT_TRACE(COMP_INIT, DBG_LOUD,
1289                  ("queue:%d, ring_addr:%p\n", prio, desc));
1290
1291         /* init every desc in this ring */
1292         if (rtlpriv->use_new_trx_flow == false) {
1293                 for (i = 0; i < entries; i++) {
1294                         nextdescaddress = cpu_to_le32((u32) desc_dma +
1295                                                       ((i +     1) % entries) *
1296                                                       sizeof(*desc));
1297
1298                         rtlpriv->cfg->ops->set_desc(hw, (u8 *) & (desc[i]),
1299                                                     true,
1300                                                     HW_DESC_TX_NEXTDESC_ADDR,
1301                                                     (u8 *) & nextdescaddress);
1302                 }
1303         }
1304         return 0;
1305 }
1306
1307 static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
1308 {
1309         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1310         struct rtl_priv *rtlpriv = rtl_priv(hw);
1311
1312         int i;
1313
1314         if (rtlpriv->use_new_trx_flow) {
1315                 struct rtl_rx_buffer_desc *entry = NULL;
1316                 /* alloc dma for this ring */
1317                 rtlpci->rx_ring[rxring_idx].buffer_desc =
1318                         pci_zalloc_consistent(rtlpci->pdev,
1319                                               sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) * rtlpci->rxringcount,
1320                                               &rtlpci->rx_ring[rxring_idx].dma);
1321                 if (!rtlpci->rx_ring[rxring_idx].buffer_desc ||
1322                     (unsigned long)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) {
1323                         RT_TRACE(COMP_ERR, DBG_EMERG, ("Cannot allocate RX ring\n"));
1324                         return -ENOMEM;
1325                 }
1326
1327                 /* init every desc in this ring */
1328                 rtlpci->rx_ring[rxring_idx].idx = 0;
1329                 for (i = 0; i < rtlpci->rxringcount; i++) {
1330                         entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
1331                         if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
1332                                                       rxring_idx, i))
1333                                 return -ENOMEM;
1334                 }
1335         } else {
1336                 struct rtl_rx_desc *entry = NULL;
1337                 u8 tmp_one = 1;
1338                 /* alloc dma for this ring */
1339                 rtlpci->rx_ring[rxring_idx].desc =
1340                         pci_zalloc_consistent(rtlpci->pdev,
1341                                               sizeof(*rtlpci->rx_ring[rxring_idx].desc) * rtlpci->rxringcount,
1342                                               &rtlpci->rx_ring[rxring_idx].dma);
1343                 if (!rtlpci->rx_ring[rxring_idx].desc ||
1344                     (unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) {
1345                         RT_TRACE(COMP_ERR, DBG_EMERG,
1346                                  ("Cannot allocate RX ring\n"));
1347                         return -ENOMEM;
1348                 }
1349
1350                 /* init every desc in this ring */
1351                 rtlpci->rx_ring[rxring_idx].idx = 0;
1352                 for (i = 0; i < rtlpci->rxringcount; i++) {
1353                         entry = &rtlpci->rx_ring[rxring_idx].desc[i];
1354                         if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
1355                                                       rxring_idx, i))
1356                                 return -ENOMEM;
1357                 }
1358                 rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry, false,
1359                                             HW_DESC_RXERO, (u8 *) & tmp_one);
1360         }
1361         return 0;
1362 }
1363
1364 static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
1365                                   unsigned int prio)
1366 {
1367         struct rtl_priv *rtlpriv = rtl_priv(hw);
1368         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1369         struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
1370
1371         /* free every desc in this ring */
1372         while (skb_queue_len(&ring->queue)) {
1373                 u8 *entry;
1374                 struct sk_buff *skb = __skb_dequeue(&ring->queue);
1375                 if (rtlpriv->use_new_trx_flow)
1376                         entry = (u8 *)(&ring->buffer_desc[ring->idx]);
1377                 else
1378                         entry = (u8 *)(&ring->desc[ring->idx]);
1379
1380                 pci_unmap_single(rtlpci->pdev,
1381                                  le32_to_cpu(rtlpriv->cfg->ops->get_desc(
1382                                  (u8 *) entry, true, HW_DESC_TXBUFF_ADDR)),
1383                                  skb->len, PCI_DMA_TODEVICE);
1384                 kfree_skb(skb);
1385                 ring->idx = (ring->idx + 1) % ring->entries;
1386         }
1387
1388         /* free dma of this ring */
1389         pci_free_consistent(rtlpci->pdev,
1390                             sizeof(*ring->desc) * ring->entries,
1391                             ring->desc, ring->dma);
1392         ring->desc = NULL;
1393         if (rtlpriv->use_new_trx_flow) {
1394                 pci_free_consistent(rtlpci->pdev,
1395                                     sizeof(*ring->buffer_desc) * ring->entries,
1396                                     ring->buffer_desc, ring->buffer_desc_dma);
1397                 ring->buffer_desc = NULL;
1398         }
1399 }
1400
1401 static void _rtl_pci_free_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
1402 {
1403         struct rtl_priv *rtlpriv = rtl_priv(hw);
1404         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1405         int i;
1406
1407         /* free every desc in this ring */
1408         for (i = 0; i < rtlpci->rxringcount; i++) {
1409                 struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[i];
1410                 if (!skb)
1411                         continue;
1412
1413                 pci_unmap_single(rtlpci->pdev, *((dma_addr_t *) skb->cb),
1414                                  rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
1415                 kfree_skb(skb);
1416         }
1417
1418         /* free dma of this ring */
1419         if (rtlpriv->use_new_trx_flow) {
1420                 pci_free_consistent(rtlpci->pdev,
1421                                     sizeof(*rtlpci->rx_ring[rxring_idx].
1422                                            buffer_desc) * rtlpci->rxringcount,
1423                                     rtlpci->rx_ring[rxring_idx].buffer_desc,
1424                                     rtlpci->rx_ring[rxring_idx].dma);
1425                 rtlpci->rx_ring[rxring_idx].buffer_desc = NULL;
1426         } else {
1427                 pci_free_consistent(rtlpci->pdev,
1428                                     sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
1429                                     rtlpci->rxringcount,
1430                                     rtlpci->rx_ring[rxring_idx].desc,
1431                                     rtlpci->rx_ring[rxring_idx].dma);
1432                 rtlpci->rx_ring[rxring_idx].desc = NULL;
1433         }
1434 }
1435
1436 static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw)
1437 {
1438         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1439         int ret;
1440         int i, rxring_idx;
1441
1442         /* rxring_idx 0:RX_MPDU_QUEUE
1443          * rxring_idx 1:RX_CMD_QUEUE */
1444         for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) {
1445                 ret = _rtl_pci_init_rx_ring(hw, rxring_idx);
1446                 if (ret)
1447                         return ret;
1448         }
1449
1450         for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1451                 ret = _rtl_pci_init_tx_ring(hw, i, rtlpci->txringcount[i]);
1452                 if (ret)
1453                         goto err_free_rings;
1454         }
1455
1456         return 0;
1457
1458 err_free_rings:
1459         for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++)
1460                 _rtl_pci_free_rx_ring(hw, rxring_idx);
1461
1462         for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1463                 if (rtlpci->tx_ring[i].desc ||
1464                     rtlpci->tx_ring[i].buffer_desc)
1465                         _rtl_pci_free_tx_ring(hw, i);
1466
1467         return 1;
1468 }
1469
1470 static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw)
1471 {
1472         u32 i, rxring_idx;
1473
1474         /*free rx rings */
1475         for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++)
1476                 _rtl_pci_free_rx_ring(hw, rxring_idx);
1477
1478         /*free tx rings */
1479         for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1480                 _rtl_pci_free_tx_ring(hw, i);
1481
1482         return 0;
1483 }
1484
1485 int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1486 {
1487         struct rtl_priv *rtlpriv = rtl_priv(hw);
1488         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1489         int i, rxring_idx;
1490         unsigned long flags;
1491         u8 tmp_one = 1;
1492         /* rxring_idx 0:RX_MPDU_QUEUE */
1493         /* rxring_idx 1:RX_CMD_QUEUE */
1494         for (rxring_idx = 0; rxring_idx < RTL_PCI_MAX_RX_QUEUE; rxring_idx++) {
1495                 /* force the rx_ring[RX_MPDU_QUEUE/
1496                  * RX_CMD_QUEUE].idx to the first one */
1497                 /*new trx flow, do nothing*/
1498                 if ((rtlpriv->use_new_trx_flow == false) &&
1499                      rtlpci->rx_ring[rxring_idx].desc) {
1500                         struct rtl_rx_desc *entry = NULL;
1501
1502                         for (i = 0; i < rtlpci->rxringcount; i++) {
1503                                 entry = &rtlpci->rx_ring[rxring_idx].desc[i];
1504                                 rtlpriv->cfg->ops->set_desc(hw, (u8 *) entry,
1505                                                             false,
1506                                                             HW_DESC_RXOWN,
1507                                                             (u8 *) & tmp_one);
1508                         }
1509                 }
1510                 rtlpci->rx_ring[rxring_idx].idx = 0;    }
1511
1512         /* after reset, release previous pending packet,
1513          * and force the  tx idx to the first one */
1514         spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1515         for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1516                 if (rtlpci->tx_ring[i].desc ||
1517                         rtlpci->tx_ring[i].buffer_desc) {
1518                         struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
1519
1520                         while (skb_queue_len(&ring->queue)) {
1521                                 u8 *entry;
1522                                 struct sk_buff *skb =
1523                                         __skb_dequeue(&ring->queue);
1524                                 if (rtlpriv->use_new_trx_flow)
1525                                         entry = (u8 *)(&ring->buffer_desc
1526                                                                 [ring->idx]);
1527                                 else
1528                                         entry = (u8 *)(&ring->desc[ring->idx]);
1529
1530                                 pci_unmap_single(rtlpci->pdev,
1531                                         le32_to_cpu(rtlpriv->cfg->ops->get_desc(
1532                                                         (u8 *)entry, true,
1533                                                         HW_DESC_TXBUFF_ADDR)),
1534                                         skb->len, PCI_DMA_TODEVICE);
1535                                 kfree_skb(skb);
1536                                 ring->idx = (ring->idx + 1) % ring->entries;
1537                         }
1538                         ring->idx = 0;
1539                 }
1540         }
1541
1542         spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1543
1544         return 0;
1545 }
1546
1547 static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1548                                         struct ieee80211_sta *sta,
1549                                         struct sk_buff *skb)
1550 {
1551         struct rtl_priv *rtlpriv = rtl_priv(hw);
1552         struct rtl_sta_info *sta_entry = NULL;
1553         u8 tid = rtl_get_tid(skb);
1554         u16 fc = rtl_get_fc(skb);
1555
1556         if(!sta)
1557                 return false;
1558         sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1559
1560         if (!rtlpriv->rtlhal.b_earlymode_enable)
1561                 return false;
1562         if (ieee80211_is_nullfunc(fc))
1563                 return false;
1564         if (ieee80211_is_qos_nullfunc(fc))
1565                 return false;
1566         if (ieee80211_is_pspoll(fc)) {
1567                 return false;
1568         }
1569
1570         if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL)
1571                 return false;
1572         if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE)
1573                 return false;
1574         if (tid > 7)
1575                 return false;
1576         /* maybe every tid should be checked */
1577         if (!rtlpriv->link_info.higher_busytxtraffic[tid])
1578                 return false;
1579
1580         spin_lock_bh(&rtlpriv->locks.waitq_lock);
1581         skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb);
1582         spin_unlock_bh(&rtlpriv->locks.waitq_lock);
1583
1584         return true;
1585 }
1586
1587 static int rtl_pci_tx(struct ieee80211_hw *hw,
1588                       struct ieee80211_sta *sta,
1589                       struct sk_buff *skb,
1590                       struct rtl_tcb_desc *ptcb_desc)
1591 {
1592         struct rtl_priv *rtlpriv = rtl_priv(hw);
1593         struct rtl_sta_info *sta_entry = NULL;
1594         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1595         struct rtl8192_tx_ring *ring;
1596         struct rtl_tx_desc *pdesc;
1597         struct rtl_tx_buffer_desc *ptx_bd_desc = NULL;
1598         u16 idx;
1599         u8 own;
1600         u8 temp_one = 1;
1601         u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
1602         unsigned long flags;
1603         struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
1604         u16 fc = rtl_get_fc(skb);
1605         u8 *pda_addr = hdr->addr1;
1606         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1607         /*ssn */
1608         u8 tid = 0;
1609         u16 seq_number = 0;
1610
1611
1612         if (ieee80211_is_mgmt(fc))
1613                 rtl_tx_mgmt_proc(hw, skb);
1614
1615         if (rtlpriv->psc.sw_ps_enabled) {
1616                 if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
1617                     !ieee80211_has_pm(fc))
1618                         hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
1619         }
1620
1621         rtl_action_proc(hw, skb, true);
1622
1623         if (is_multicast_ether_addr(pda_addr))
1624                 rtlpriv->stats.txbytesmulticast += skb->len;
1625         else if (is_broadcast_ether_addr(pda_addr))
1626                 rtlpriv->stats.txbytesbroadcast += skb->len;
1627         else
1628                 rtlpriv->stats.txbytesunicast += skb->len;
1629
1630         spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1631         ring = &rtlpci->tx_ring[hw_queue];
1632         if (hw_queue != BEACON_QUEUE) {
1633                 if (rtlpriv->use_new_trx_flow)
1634                         idx = ring->cur_tx_wp;
1635                 else
1636                         idx = (ring->idx + skb_queue_len(&ring->queue)) %
1637                               ring->entries;
1638         } else {
1639                 idx = 0;
1640         }
1641
1642         pdesc = &ring->desc[idx];
1643
1644         if (rtlpriv->use_new_trx_flow) {
1645                 ptx_bd_desc = &ring->buffer_desc[idx];
1646         } else {
1647                 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
1648                                 true, HW_DESC_OWN);
1649
1650                 if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
1651                         RT_TRACE(COMP_ERR, DBG_WARNING,
1652                                  ("No more TX desc@%d, ring->idx = %d,"
1653                                   "idx = %d, skb_queue_len = 0x%d\n",
1654                                   hw_queue, ring->idx, idx,
1655                                   skb_queue_len(&ring->queue)));
1656
1657                         spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
1658                                                flags);
1659                         return skb->len;
1660                 }
1661         }
1662
1663         if (ieee80211_is_data_qos(fc)) {
1664                 tid = rtl_get_tid(skb);
1665                 if (sta) {
1666                         sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1667                         seq_number = (le16_to_cpu(hdr->seq_ctrl) &
1668                                       IEEE80211_SCTL_SEQ) >> 4;
1669                         seq_number += 1;
1670
1671                         if (!ieee80211_has_morefrags(hdr->frame_control))
1672                                 sta_entry->tids[tid].seq_number = seq_number;
1673                 }
1674         }
1675
1676         if (ieee80211_is_data(fc))
1677                 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
1678
1679         rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
1680                                         (u8 *)ptx_bd_desc, info, sta, skb,
1681                                         hw_queue, ptcb_desc);
1682
1683         __skb_queue_tail(&ring->queue, skb);
1684         if (rtlpriv->use_new_trx_flow) {
1685                 rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true,
1686                                             HW_DESC_OWN, (u8 *) & hw_queue);
1687         } else {
1688                 rtlpriv->cfg->ops->set_desc(hw, (u8 *) pdesc, true,
1689                                             HW_DESC_OWN, (u8 *) & temp_one);
1690         }
1691
1692         if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
1693             hw_queue != BEACON_QUEUE) {
1694
1695                 RT_TRACE(COMP_ERR, DBG_LOUD,
1696                          ("less desc left, stop skb_queue@%d, "
1697                           "ring->idx = %d,"
1698                           "idx = %d, skb_queue_len = 0x%d\n",
1699                           hw_queue, ring->idx, idx,
1700                           skb_queue_len(&ring->queue)));
1701
1702                 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
1703         }
1704
1705         spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1706
1707         rtlpriv->cfg->ops->tx_polling(hw, hw_queue);
1708
1709         return 0;
1710 }
1711 static void rtl_pci_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
1712 {
1713         struct rtl_priv *rtlpriv = rtl_priv(hw);
1714         struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1715         struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1716         struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1717         u16 i = 0;
1718         int queue_id;
1719         struct rtl8192_tx_ring *ring;
1720
1721         if (mac->skip_scan)
1722                 return;
1723
1724         for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) {
1725                 u32 queue_len;
1726                 if (((queues >> queue_id) & 0x1) == 0) {
1727                         queue_id--;
1728                         continue;
1729                 }
1730                 ring = &pcipriv->dev.tx_ring[queue_id];
1731                 queue_len = skb_queue_len(&ring->queue);
1732                 if (queue_len == 0 || queue_id == BEACON_QUEUE ||
1733                         queue_id == TXCMD_QUEUE) {
1734                         queue_id--;
1735                         continue;
1736                 } else {
1737                         msleep(5);
1738                         i++;
1739                 }
1740
1741                 /* we just wait 1s for all queues */
1742                 if (rtlpriv->psc.rfpwr_state == ERFOFF ||
1743                         is_hal_stop(rtlhal) || i >= 200)
1744                         return;
1745         }
1746 }
1747
1748 void rtl_pci_deinit(struct ieee80211_hw *hw)
1749 {
1750         struct rtl_priv *rtlpriv = rtl_priv(hw);
1751         struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1752
1753         _rtl_pci_deinit_trx_ring(hw);
1754
1755         synchronize_irq(rtlpci->pdev->irq);
1756         tasklet_kill(&rtlpriv->works.irq_tasklet);
1757
1758         flush_workqueue(rtlpriv->works.rtl_wq);
1759         destroy_workqueue(rtlpriv->works.rtl_wq);
1760
1761 }
1762
1763 int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
1764 {
1765         struct rtl_priv *rtlpriv = rtl_priv(hw);
1766         int err;
1767
1768         _rtl_pci_init_struct(hw, pdev);
1769
1770         err = _rtl_pci_init_trx_ring(hw);
1771         if (err) {
1772                 RT_TRACE(COMP_ERR, DBG_EMERG,
1773                          ("tx ring initialization failed"));
1774                 return err;
1775         }
1776
1777         return 1;
1778 }
1779
1780 int rtl_pci_start(struct ieee80211_hw *hw)
1781 {
1782         struct rtl_priv *rtlpriv = rtl_priv(hw);
1783         struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1784         struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1785
1786         int err = 0;
1787         RT_TRACE(COMP_INIT, DBG_DMESG, (" rtl_pci_start \n"));
1788         rtl_pci_reset_trx_ring(hw);
1789
1790         rtlpriv->rtlhal.driver_is_goingto_unload = false;
1791         err = rtlpriv->cfg->ops->hw_init(hw);
1792         if (err) {
1793                 RT_TRACE(COMP_INIT, DBG_DMESG,
1794                          ("Failed to config hardware err %x!\n",err));
1795                 return err;
1796         }
1797
1798         rtlpriv->cfg->ops->enable_interrupt(hw);
1799         RT_TRACE(COMP_INIT, DBG_LOUD, ("enable_interrupt OK\n"));
1800
1801         rtl_init_rx_config(hw);
1802
1803         /*should after adapter start and interrupt enable. */
1804         set_hal_start(rtlhal);
1805
1806         RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1807
1808         rtlpriv->rtlhal.up_first_time = false;
1809
1810         RT_TRACE(COMP_INIT, DBG_DMESG, ("rtl_pci_start OK\n"));
1811         return 0;
1812 }
1813
1814 void rtl_pci_stop(struct ieee80211_hw *hw)
1815 {
1816         struct rtl_priv *rtlpriv = rtl_priv(hw);
1817         struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1818         struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1819         u8 RFInProgressTimeOut = 0;
1820
1821         /*
1822          *should before disable interrupt&adapter
1823          *and will do it immediately.
1824          */
1825         set_hal_stop(rtlhal);
1826
1827         rtlpriv->cfg->ops->disable_interrupt(hw);
1828
1829         spin_lock(&rtlpriv->locks.rf_ps_lock);
1830         while (ppsc->rfchange_inprogress) {
1831                 spin_unlock(&rtlpriv->locks.rf_ps_lock);
1832                 if (RFInProgressTimeOut > 100) {
1833                         spin_lock(&rtlpriv->locks.rf_ps_lock);
1834                         break;
1835                 }
1836                 mdelay(1);
1837                 RFInProgressTimeOut++;
1838                 spin_lock(&rtlpriv->locks.rf_ps_lock);
1839         }
1840         ppsc->rfchange_inprogress = true;
1841         spin_unlock(&rtlpriv->locks.rf_ps_lock);
1842
1843         rtlpriv->rtlhal.driver_is_goingto_unload = true;
1844         rtlpriv->cfg->ops->hw_disable(hw);
1845         rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1846
1847         spin_lock(&rtlpriv->locks.rf_ps_lock);
1848         ppsc->rfchange_inprogress = false;
1849         spin_unlock(&rtlpriv->locks.rf_ps_lock);
1850
1851         rtl_pci_enable_aspm(hw);
1852 }
1853
1854 static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1855                                   struct ieee80211_hw *hw)
1856 {
1857         struct rtl_priv *rtlpriv = rtl_priv(hw);
1858         struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1859         struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1860         struct pci_dev *bridge_pdev = pdev->bus->self;
1861         u16 venderid;
1862         u16 deviceid;
1863         u8 revisionid;
1864         u16 irqline;
1865         u8 tmp;
1866
1867         venderid = pdev->vendor;
1868         deviceid = pdev->device;
1869         pci_read_config_byte(pdev, 0x8, &revisionid);
1870         pci_read_config_word(pdev, 0x3C, &irqline);
1871
1872         if (deviceid == RTL_PCI_8192_DID ||
1873             deviceid == RTL_PCI_0044_DID ||
1874             deviceid == RTL_PCI_0047_DID ||
1875             deviceid == RTL_PCI_8192SE_DID ||
1876             deviceid == RTL_PCI_8174_DID ||
1877             deviceid == RTL_PCI_8173_DID ||
1878             deviceid == RTL_PCI_8172_DID ||
1879             deviceid == RTL_PCI_8171_DID) {
1880                 switch (revisionid) {
1881                 case RTL_PCI_REVISION_ID_8192PCIE:
1882                         RT_TRACE(COMP_INIT, DBG_DMESG,
1883                                  ("8192E is found but not supported now-"
1884                                   "vid/did=%x/%x\n", venderid, deviceid));
1885                         rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
1886                         return false;
1887                         break;
1888                 case RTL_PCI_REVISION_ID_8192SE:
1889                         RT_TRACE(COMP_INIT, DBG_DMESG,
1890                                  ("8192SE is found - "
1891                                   "vid/did=%x/%x\n", venderid, deviceid));
1892                         rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1893                         break;
1894                 default:
1895                         RT_TRACE(COMP_ERR, DBG_WARNING,
1896                                  ("Err: Unknown device - "
1897                                   "vid/did=%x/%x\n", venderid, deviceid));
1898                         rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1899                         break;
1900
1901                 }
1902         }else if(deviceid == RTL_PCI_8723AE_DID) {
1903                 rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE;
1904                 RT_TRACE(COMP_INIT, DBG_DMESG,
1905                          ("8723AE PCI-E is found - "
1906                           "vid/did=%x/%x\n", venderid, deviceid));
1907         } else if (deviceid == RTL_PCI_8192CET_DID ||
1908                    deviceid == RTL_PCI_8192CE_DID ||
1909                    deviceid == RTL_PCI_8191CE_DID ||
1910                    deviceid == RTL_PCI_8188CE_DID) {
1911                 rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
1912                 RT_TRACE(COMP_INIT, DBG_DMESG,
1913                          ("8192C PCI-E is found - "
1914                           "vid/did=%x/%x\n", venderid, deviceid));
1915         } else if (deviceid == RTL_PCI_8192DE_DID ||
1916                    deviceid == RTL_PCI_8192DE_DID2) {
1917                 rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
1918                 RT_TRACE(COMP_INIT, DBG_DMESG,
1919                          ("8192D PCI-E is found - "
1920                           "vid/did=%x/%x\n", venderid, deviceid));
1921         }else if(deviceid == RTL_PCI_8188EE_DID){
1922                         rtlhal->hw_type = HARDWARE_TYPE_RTL8188EE;
1923                         RT_TRACE(COMP_INIT,DBG_LOUD,
1924                                  ("Find adapter, Hardware type is 8188EE\n"));
1925         }else if (deviceid == RTL_PCI_8723BE_DID){
1926                         rtlhal->hw_type = HARDWARE_TYPE_RTL8723BE;
1927                         RT_TRACE(COMP_INIT,DBG_LOUD,
1928                                  ("Find adapter, Hardware type is 8723BE\n"));
1929         }else if (deviceid == RTL_PCI_8192EE_DID){
1930                         rtlhal->hw_type = HARDWARE_TYPE_RTL8192EE;
1931                         RT_TRACE(COMP_INIT,DBG_LOUD,
1932                                  ("Find adapter, Hardware type is 8192EE\n"));
1933         }else if (deviceid == RTL_PCI_8821AE_DID) {
1934                         rtlhal->hw_type = HARDWARE_TYPE_RTL8821AE;
1935                         RT_TRACE(COMP_INIT,DBG_LOUD,
1936                                 ("Find adapter, Hardware type is 8821AE\n"));
1937         }else if (deviceid == RTL_PCI_8812AE_DID) {
1938                         rtlhal->hw_type = HARDWARE_TYPE_RTL8812AE;
1939                         RT_TRACE(COMP_INIT,DBG_LOUD,
1940                                 ("Find adapter, Hardware type is 8812AE\n"));
1941         }else {
1942                 RT_TRACE(COMP_ERR, DBG_WARNING,
1943                          ("Err: Unknown device -"
1944                           " vid/did=%x/%x\n", venderid, deviceid));
1945
1946                 rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
1947         }
1948
1949         if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
1950                 if (revisionid == 0 || revisionid == 1) {
1951                         if (revisionid == 0) {
1952                                 RT_TRACE(COMP_INIT, DBG_LOUD,
1953                                          ("Find 92DE MAC0.\n"));
1954                                 rtlhal->interfaceindex = 0;
1955                         } else if (revisionid == 1) {
1956                                 RT_TRACE(COMP_INIT, DBG_LOUD,
1957                                          ("Find 92DE MAC1.\n"));
1958                                 rtlhal->interfaceindex = 1;
1959                         }
1960                 } else {
1961                         RT_TRACE(COMP_INIT, DBG_LOUD, ("Unknown device - "
1962                                  "VendorID/DeviceID=%x/%x, Revision=%x\n",
1963                                  venderid, deviceid, revisionid));
1964                         rtlhal->interfaceindex = 0;
1965                 }
1966         }
1967
1968         /* 92ee use new trx flow */
1969         if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192EE)
1970                 rtlpriv->use_new_trx_flow = true;
1971         else
1972                 rtlpriv->use_new_trx_flow = false;
1973
1974         /*find bus info */
1975         pcipriv->ndis_adapter.busnumber = pdev->bus->number;
1976         pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
1977         pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
1978
1979         /*find bridge info */
1980         pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN;
1981         /* some ARM have no bridge_pdev and will crash here
1982          * so we should check if bridge_pdev is NULL */
1983         if (bridge_pdev) {
1984                 pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
1985                 for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
1986                         if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
1987                                 pcipriv->ndis_adapter.pcibridge_vendor = tmp;
1988                                 RT_TRACE(COMP_INIT, DBG_DMESG,
1989                                          ("Pci Bridge Vendor is found index: %d\n",
1990                                           tmp));
1991                                 break;
1992                         }
1993                 }
1994         }
1995
1996         if (pcipriv->ndis_adapter.pcibridge_vendor !=
1997             PCI_BRIDGE_VENDOR_UNKNOWN) {
1998                 pcipriv->ndis_adapter.pcibridge_busnum =
1999                     bridge_pdev->bus->number;
2000                 pcipriv->ndis_adapter.pcibridge_devnum =
2001                     PCI_SLOT(bridge_pdev->devfn);
2002                 pcipriv->ndis_adapter.pcibridge_funcnum =
2003                     PCI_FUNC(bridge_pdev->devfn);
2004                 pcipriv->ndis_adapter.pcicfg_addrport =
2005                     (pcipriv->ndis_adapter.pcibridge_busnum << 16) |
2006                     (pcipriv->ndis_adapter.pcibridge_devnum << 11) |
2007                     (pcipriv->ndis_adapter.pcibridge_funcnum << 8) | (1 << 31);
2008                 pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
2009                     pci_pcie_cap(bridge_pdev);
2010                 pcipriv->ndis_adapter.num4bytes =
2011                     (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
2012
2013                 rtl_pci_get_linkcontrol_field(hw);
2014
2015                 if (pcipriv->ndis_adapter.pcibridge_vendor ==
2016                     PCI_BRIDGE_VENDOR_AMD) {
2017                         pcipriv->ndis_adapter.amd_l1_patch =
2018                             rtl_pci_get_amd_l1_patch(hw);
2019                 }
2020         }
2021
2022         RT_TRACE(COMP_INIT, DBG_DMESG,
2023                  ("pcidev busnumber:devnumber:funcnumber:"
2024                   "vendor:link_ctl %d:%d:%d:%x:%x\n",
2025                   pcipriv->ndis_adapter.busnumber,
2026                   pcipriv->ndis_adapter.devnumber,
2027                   pcipriv->ndis_adapter.funcnumber,
2028                   pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg));
2029
2030         RT_TRACE(COMP_INIT, DBG_DMESG,
2031                  ("pci_bridge busnumber:devnumber:funcnumber:vendor:"
2032                   "pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
2033                   pcipriv->ndis_adapter.pcibridge_busnum,
2034                   pcipriv->ndis_adapter.pcibridge_devnum,
2035                   pcipriv->ndis_adapter.pcibridge_funcnum,
2036                   pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
2037                   pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
2038                   pcipriv->ndis_adapter.pcibridge_linkctrlreg,
2039                   pcipriv->ndis_adapter.amd_l1_patch));
2040
2041         rtl_pci_parse_configuration(pdev, hw);
2042         list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
2043         return true;
2044 }
2045
2046 static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
2047 {
2048         struct rtl_priv *rtlpriv = rtl_priv(hw);
2049         struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2050         struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
2051         int ret;
2052         ret = pci_enable_msi(rtlpci->pdev);
2053         if (ret < 0)
2054                 return ret;
2055
2056         ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
2057                           IRQF_SHARED, KBUILD_MODNAME, hw);
2058         if (ret < 0) {
2059                 pci_disable_msi(rtlpci->pdev);
2060                 return ret;
2061         }
2062
2063         rtlpci->using_msi = true;
2064
2065         RT_TRACE(COMP_INIT|COMP_INTR, DBG_DMESG, ("MSI Interrupt Mode!\n"));
2066         return 0;
2067 }
2068
2069 static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
2070 {
2071         struct rtl_priv *rtlpriv = rtl_priv(hw);
2072         struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2073         struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
2074         int ret;
2075
2076         ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
2077                           IRQF_SHARED, KBUILD_MODNAME, hw);
2078         if (ret < 0) {
2079                 return ret;
2080         }
2081
2082         rtlpci->using_msi = false;
2083         RT_TRACE(COMP_INIT|COMP_INTR, DBG_DMESG,
2084                  ("Pin-based Interrupt Mode!\n"));
2085         return 0;
2086 }
2087
2088 static int rtl_pci_intr_mode_decide(struct ieee80211_hw *hw)
2089 {
2090         struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2091         struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
2092         int ret;
2093         if (rtlpci->msi_support == true) {
2094                 ret = rtl_pci_intr_mode_msi(hw);
2095                 if (ret < 0)
2096                         ret = rtl_pci_intr_mode_legacy(hw);
2097         } else {
2098                 ret = rtl_pci_intr_mode_legacy(hw);
2099         }
2100         return ret;
2101 }
2102
2103 /* this is used for other modules get
2104  * hw pointer in rtl_pci_get_hw_pointer */
2105 struct ieee80211_hw *hw_export = NULL;
2106
2107 int rtl_pci_probe(struct pci_dev *pdev,
2108                   const struct pci_device_id *id)
2109 {
2110         struct ieee80211_hw *hw = NULL;
2111
2112         struct rtl_priv *rtlpriv = NULL;
2113         struct rtl_pci_priv *pcipriv = NULL;
2114         struct rtl_pci *rtlpci;
2115         unsigned long pmem_start, pmem_len, pmem_flags;
2116         int err;
2117
2118
2119         err = pci_enable_device(pdev);
2120         if (err) {
2121                 RT_ASSERT(false,
2122                           ("%s : Cannot enable new PCI device\n",
2123                            pci_name(pdev)));
2124                 return err;
2125         }
2126
2127         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
2128                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2129                         RT_ASSERT(false, ("Unable to obtain 32bit DMA "
2130                                           "for consistent allocations\n"));
2131                         pci_disable_device(pdev);
2132                         return -ENOMEM;
2133                 }
2134         }
2135
2136         pci_set_master(pdev);
2137
2138         hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
2139                                 sizeof(struct rtl_priv), &rtl_ops);
2140         if (!hw) {
2141                 RT_ASSERT(false,
2142                           ("%s : ieee80211 alloc failed\n", pci_name(pdev)));
2143                 err = -ENOMEM;
2144                 goto fail1;
2145         }
2146         hw_export = hw;
2147
2148         SET_IEEE80211_DEV(hw, &pdev->dev);
2149         pci_set_drvdata(pdev, hw);
2150
2151         rtlpriv = hw->priv;
2152         pcipriv = (void *)rtlpriv->priv;
2153         pcipriv->dev.pdev = pdev;
2154
2155         /* init cfg & intf_ops */
2156         rtlpriv->rtlhal.interface = INTF_PCI;
2157         rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
2158         rtlpriv->intf_ops = &rtl_pci_ops;
2159         rtlpriv->glb_var = &global_var;
2160
2161         /*
2162          *init dbgp flags before all
2163          *other functions, because we will
2164          *use it in other functions like
2165          *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
2166          *you can not use these macro
2167          *before this
2168          */
2169         rtl_dbgp_flag_init(hw);
2170
2171         /* MEM map */
2172         err = pci_request_regions(pdev, KBUILD_MODNAME);
2173         if (err) {
2174                 RT_ASSERT(false, ("Can't obtain PCI resources\n"));
2175                 return err;
2176         }
2177
2178         pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id);
2179         pmem_len = pci_resource_len(pdev, rtlpriv->cfg->bar_id);
2180         pmem_flags = pci_resource_flags(pdev, rtlpriv->cfg->bar_id);
2181
2182         /*shared mem start */
2183         rtlpriv->io.pci_mem_start =
2184                         pci_iomap(pdev,
2185                         rtlpriv->cfg->bar_id, pmem_len);
2186         if (rtlpriv->io.pci_mem_start == NULL) {
2187                 RT_ASSERT(false, ("Can't map PCI mem\n"));
2188                 goto fail2;
2189         }
2190
2191         RT_TRACE(COMP_INIT, DBG_DMESG,
2192                  ("mem mapped space: start: 0x%08lx len:%08lx "
2193                   "flags:%08lx, after map:0x%p\n",
2194                   pmem_start, pmem_len, pmem_flags,
2195                   rtlpriv->io.pci_mem_start));
2196
2197         /* Disable Clk Request */
2198         pci_write_config_byte(pdev, 0x81, 0);
2199         /* leave D3 mode */
2200         pci_write_config_byte(pdev, 0x44, 0);
2201         pci_write_config_byte(pdev, 0x04, 0x06);
2202         pci_write_config_byte(pdev, 0x04, 0x07);
2203
2204         /* find adapter */
2205         /* if chip not support, will return false */
2206         if(!_rtl_pci_find_adapter(pdev, hw))
2207                 goto fail3;
2208
2209         /* Init IO handler */
2210         _rtl_pci_io_handler_init(&pdev->dev, hw);
2211
2212         /*like read eeprom and so on */
2213         rtlpriv->cfg->ops->read_eeprom_info(hw);
2214
2215         if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
2216                 RT_TRACE(COMP_ERR, DBG_EMERG, ("Can't init_sw_vars.\n"));
2217                 goto fail3;
2218         }
2219
2220         rtlpriv->cfg->ops->init_sw_leds(hw);
2221
2222         /*aspm */
2223         rtl_pci_init_aspm(hw);
2224
2225         /* Init mac80211 sw */
2226         err = rtl_init_core(hw);
2227         if (err) {
2228                 RT_TRACE(COMP_ERR, DBG_EMERG,
2229                          ("Can't allocate sw for mac80211.\n"));
2230                 goto fail3;
2231         }
2232
2233         /* Init PCI sw */
2234         err = !rtl_pci_init(hw, pdev);
2235         if (err) {
2236                 RT_TRACE(COMP_ERR, DBG_EMERG, ("Failed to init PCI.\n"));
2237                 goto fail3;
2238         }
2239
2240         err = ieee80211_register_hw(hw);
2241         if (err) {
2242                 RT_TRACE(COMP_ERR, DBG_EMERG,
2243                          ("Can't register mac80211 hw.\n"));
2244                 goto fail3;
2245         } else {
2246                 rtlpriv->mac80211.mac80211_registered = 1;
2247         }
2248         /* the wiphy must have been registed to
2249          * cfg80211 prior to regulatory_hint */
2250         if (regulatory_hint(hw->wiphy, rtlpriv->regd.alpha2)) {
2251                 RT_TRACE(COMP_ERR, DBG_WARNING, ("regulatory_hint fail\n"));
2252         }
2253
2254         err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
2255         if (err) {
2256                 RT_TRACE(COMP_ERR, DBG_EMERG,
2257                          ("failed to create sysfs device attributes\n"));
2258                 goto fail3;
2259         }
2260         /* add for prov */
2261         rtl_proc_add_one(hw);
2262
2263         /*init rfkill */
2264         rtl_init_rfkill(hw);
2265
2266         rtlpci = rtl_pcidev(pcipriv);
2267
2268         err = rtl_pci_intr_mode_decide(hw);
2269         if (err) {
2270                 RT_TRACE(COMP_INIT, DBG_DMESG,
2271                          ("%s: failed to register IRQ handler\n",
2272                           wiphy_name(hw->wiphy)));
2273                 goto fail3;
2274         } else {
2275                 rtlpci->irq_alloc = 1;
2276         }
2277
2278         set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
2279         return 0;
2280
2281 fail3:
2282         pci_set_drvdata(pdev, NULL);
2283         rtl_deinit_core(hw);
2284         ieee80211_free_hw(hw);
2285
2286         if (rtlpriv->io.pci_mem_start != NULL)
2287                 pci_iounmap(pdev, rtlpriv->io.pci_mem_start);
2288
2289 fail2:
2290         pci_release_regions(pdev);
2291
2292 fail1:
2293
2294         pci_disable_device(pdev);
2295
2296         return -ENODEV;
2297
2298 }
2299 /* EXPORT_SYMBOL(rtl_pci_probe); */
2300
2301 struct ieee80211_hw *rtl_pci_get_hw_pointer(void)
2302 {
2303         return hw_export;
2304 }
2305 /* EXPORT_SYMBOL(rtl_pci_get_hw_pointer); */
2306
2307 void rtl_pci_disconnect(struct pci_dev *pdev)
2308 {
2309         struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2310         struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2311         struct rtl_priv *rtlpriv = rtl_priv(hw);
2312         struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
2313         struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
2314
2315         clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
2316
2317         sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group);
2318
2319         /* add for prov */
2320         rtl_proc_remove_one(hw);
2321
2322
2323         /*ieee80211_unregister_hw will call ops_stop */
2324         if (rtlmac->mac80211_registered == 1) {
2325                 ieee80211_unregister_hw(hw);
2326                 rtlmac->mac80211_registered = 0;
2327         } else {
2328                 rtl_deinit_deferred_work(hw);
2329                 rtlpriv->intf_ops->adapter_stop(hw);
2330         }
2331
2332         /*deinit rfkill */
2333         rtl_deinit_rfkill(hw);
2334
2335         rtl_pci_deinit(hw);
2336         rtl_deinit_core(hw);
2337         rtlpriv->cfg->ops->deinit_sw_vars(hw);
2338
2339         if (rtlpci->irq_alloc) {
2340                 synchronize_irq(rtlpci->pdev->irq);
2341                 free_irq(rtlpci->pdev->irq, hw);
2342                 rtlpci->irq_alloc = 0;
2343         }
2344
2345         if (rtlpci->using_msi == true)
2346                 pci_disable_msi(rtlpci->pdev);
2347
2348         list_del(&rtlpriv->list);
2349         if (rtlpriv->io.pci_mem_start != NULL) {
2350                 pci_iounmap(pdev, rtlpriv->io.pci_mem_start);
2351                 pci_release_regions(pdev);
2352         }
2353
2354         pci_disable_device(pdev);
2355
2356         rtl_pci_disable_aspm(hw);
2357
2358         pci_set_drvdata(pdev, NULL);
2359
2360         ieee80211_free_hw(hw);
2361 }
2362 /* EXPORT_SYMBOL(rtl_pci_disconnect); */
2363
2364 /***************************************
2365 kernel pci power state define:
2366 PCI_D0         ((pci_power_t __force) 0)
2367 PCI_D1         ((pci_power_t __force) 1)
2368 PCI_D2         ((pci_power_t __force) 2)
2369 PCI_D3hot      ((pci_power_t __force) 3)
2370 PCI_D3cold     ((pci_power_t __force) 4)
2371 PCI_UNKNOWN    ((pci_power_t __force) 5)
2372
2373 This function is called when system
2374 goes into suspend state mac80211 will
2375 call rtl_mac_stop() from the mac80211
2376 suspend function first, So there is
2377 no need to call hw_disable here.
2378 ****************************************/
2379 int rtl_pci_suspend(struct device *dev)
2380 {
2381         struct pci_dev *pdev = to_pci_dev(dev);
2382         struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2383         struct rtl_priv *rtlpriv = rtl_priv(hw);
2384
2385         rtlpriv->cfg->ops->hw_suspend(hw);
2386         rtl_deinit_rfkill(hw);
2387
2388         return 0;
2389 }
2390 /* EXPORT_SYMBOL(rtl_pci_suspend); */
2391
2392 int rtl_pci_resume(struct device *dev)
2393 {
2394         struct pci_dev *pdev = to_pci_dev(dev);
2395         struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2396         struct rtl_priv *rtlpriv = rtl_priv(hw);
2397
2398         rtlpriv->cfg->ops->hw_resume(hw);
2399         rtl_init_rfkill(hw);
2400
2401         return 0;
2402 }
2403 /* EXPORT_SYMBOL(rtl_pci_resume); */
2404
2405 struct rtl_intf_ops rtl_pci_ops = {
2406         .read_efuse_byte = read_efuse_byte,
2407         .adapter_start = rtl_pci_start,
2408         .adapter_stop = rtl_pci_stop,
2409         .check_buddy_priv = rtl_pci_check_buddy_priv,
2410         .adapter_tx = rtl_pci_tx,
2411         .flush = rtl_pci_flush,
2412         .reset_trx_ring = rtl_pci_reset_trx_ring,
2413         .waitq_insert = rtl_pci_tx_chk_waitq_insert,
2414
2415         .disable_aspm = rtl_pci_disable_aspm,
2416         .enable_aspm = rtl_pci_enable_aspm,
2417 };