2 * ahci.c - AHCI SATA support
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2004-2005 Red Hat, Inc.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
49 #define DRV_NAME "ahci"
50 #define DRV_VERSION "3.0"
52 /* Enclosure Management Control */
53 #define EM_CTRL_MSG_TYPE 0x000f0000
55 /* Enclosure Management LED Message Type */
56 #define EM_MSG_LED_HBA_PORT 0x0000000f
57 #define EM_MSG_LED_PMP_SLOT 0x0000ff00
58 #define EM_MSG_LED_VALUE 0xffff0000
59 #define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
60 #define EM_MSG_LED_VALUE_OFF 0xfff80000
61 #define EM_MSG_LED_VALUE_ON 0x00010000
63 static int ahci_skip_host_reset;
64 static int ahci_ignore_sss;
66 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
67 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
69 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
70 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
72 static int ahci_enable_alpm(struct ata_port *ap,
74 static void ahci_disable_alpm(struct ata_port *ap);
75 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
76 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
78 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
84 AHCI_MAX_SG = 168, /* hardware max is 64K */
85 AHCI_DMA_BOUNDARY = 0xffffffff,
88 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
90 AHCI_CMD_TBL_CDB = 0x40,
91 AHCI_CMD_TBL_HDR_SZ = 0x80,
92 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
93 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
94 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
96 AHCI_IRQ_ON_SG = (1 << 31),
97 AHCI_CMD_ATAPI = (1 << 5),
98 AHCI_CMD_WRITE = (1 << 6),
99 AHCI_CMD_PREFETCH = (1 << 7),
100 AHCI_CMD_RESET = (1 << 8),
101 AHCI_CMD_CLR_BUSY = (1 << 10),
103 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
104 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
105 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
108 board_ahci_vt8251 = 1,
109 board_ahci_ign_iferr = 2,
110 board_ahci_sb600 = 3,
112 board_ahci_sb700 = 5, /* for SB700 and SB800 */
113 board_ahci_mcp65 = 6,
114 board_ahci_nopmp = 7,
115 board_ahci_yesncq = 8,
116 board_ahci_nosntf = 9,
118 /* global controller registers */
119 HOST_CAP = 0x00, /* host capabilities */
120 HOST_CTL = 0x04, /* global host control */
121 HOST_IRQ_STAT = 0x08, /* interrupt status */
122 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
123 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
124 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
125 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
126 HOST_CAP2 = 0x24, /* host capabilities, extended */
129 HOST_RESET = (1 << 0), /* reset controller; self-clear */
130 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
131 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
134 HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
135 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
136 HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
137 HOST_CAP_PART = (1 << 13), /* Partial state capable */
138 HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
139 HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
140 HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
141 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
142 HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
143 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
144 HOST_CAP_LED = (1 << 25), /* Supports activity LED */
145 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
146 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
147 HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
148 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
149 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
150 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
153 HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
154 HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
155 HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
157 /* registers for each SATA port */
158 PORT_LST_ADDR = 0x00, /* command list DMA addr */
159 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
160 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
161 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
162 PORT_IRQ_STAT = 0x10, /* interrupt status */
163 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
164 PORT_CMD = 0x18, /* port command */
165 PORT_TFDATA = 0x20, /* taskfile data */
166 PORT_SIG = 0x24, /* device TF signature */
167 PORT_CMD_ISSUE = 0x38, /* command issue */
168 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
169 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
170 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
171 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
172 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
174 /* PORT_IRQ_{STAT,MASK} bits */
175 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
176 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
177 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
178 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
179 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
180 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
181 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
182 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
184 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
185 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
186 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
187 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
188 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
189 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
190 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
191 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
192 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
194 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
200 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
202 PORT_IRQ_HBUS_DATA_ERR,
203 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
204 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
205 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
208 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
209 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
210 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
211 PORT_CMD_PMP = (1 << 17), /* PMP attached */
212 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
213 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
214 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
215 PORT_CMD_CLO = (1 << 3), /* Command list override */
216 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
217 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
218 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
220 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
221 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
222 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
223 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
225 /* hpriv->flags bits */
226 AHCI_HFLAG_NO_NCQ = (1 << 0),
227 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
228 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
229 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
230 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
231 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
232 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
233 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
234 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
235 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
236 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
237 AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
239 AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
243 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
244 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
245 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
248 ICH_MAP = 0x90, /* ICH MAP register */
255 EM_CTL_RST = (1 << 9), /* Reset */
256 EM_CTL_TM = (1 << 8), /* Transmit Message */
257 EM_CTL_ALHD = (1 << 26), /* Activity LED */
260 struct ahci_cmd_hdr {
275 struct ahci_em_priv {
276 enum sw_activity blink_policy;
277 struct timer_list timer;
278 unsigned long saved_activity;
279 unsigned long activity;
280 unsigned long led_state;
283 struct ahci_host_priv {
284 unsigned int flags; /* AHCI_HFLAG_* */
285 u32 cap; /* cap to use */
286 u32 cap2; /* cap2 to use */
287 u32 port_map; /* port map to use */
288 u32 saved_cap; /* saved initial cap */
289 u32 saved_cap2; /* saved initial cap2 */
290 u32 saved_port_map; /* saved initial port_map */
291 u32 em_loc; /* enclosure management location */
294 struct ahci_port_priv {
295 struct ata_link *active_link;
296 struct ahci_cmd_hdr *cmd_slot;
297 dma_addr_t cmd_slot_dma;
299 dma_addr_t cmd_tbl_dma;
301 dma_addr_t rx_fis_dma;
302 /* for NCQ spurious interrupt analysis */
303 unsigned int ncq_saw_d2h:1;
304 unsigned int ncq_saw_dmas:1;
305 unsigned int ncq_saw_sdb:1;
306 u32 intr_mask; /* interrupts to enable */
307 /* enclosure management info per PM slot */
308 struct ahci_em_priv em_priv[EM_MAX_SLOTS];
311 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
312 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
313 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
314 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
315 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
316 static int ahci_port_start(struct ata_port *ap);
317 static void ahci_port_stop(struct ata_port *ap);
318 static void ahci_qc_prep(struct ata_queued_cmd *qc);
319 static void ahci_freeze(struct ata_port *ap);
320 static void ahci_thaw(struct ata_port *ap);
321 static void ahci_pmp_attach(struct ata_port *ap);
322 static void ahci_pmp_detach(struct ata_port *ap);
323 static int ahci_softreset(struct ata_link *link, unsigned int *class,
324 unsigned long deadline);
325 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
326 unsigned long deadline);
327 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
328 unsigned long deadline);
329 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
330 unsigned long deadline);
331 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
332 unsigned long deadline);
333 static void ahci_postreset(struct ata_link *link, unsigned int *class);
334 static void ahci_error_handler(struct ata_port *ap);
335 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
336 static int ahci_port_resume(struct ata_port *ap);
337 static void ahci_dev_config(struct ata_device *dev);
338 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
341 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
342 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
343 static int ahci_pci_device_resume(struct pci_dev *pdev);
345 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
346 static ssize_t ahci_activity_store(struct ata_device *dev,
347 enum sw_activity val);
348 static void ahci_init_sw_activity(struct ata_link *link);
350 static ssize_t ahci_show_host_caps(struct device *dev,
351 struct device_attribute *attr, char *buf);
352 static ssize_t ahci_show_host_cap2(struct device *dev,
353 struct device_attribute *attr, char *buf);
354 static ssize_t ahci_show_host_version(struct device *dev,
355 struct device_attribute *attr, char *buf);
356 static ssize_t ahci_show_port_cmd(struct device *dev,
357 struct device_attribute *attr, char *buf);
359 DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
360 DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
361 DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
362 DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
364 static struct device_attribute *ahci_shost_attrs[] = {
365 &dev_attr_link_power_management_policy,
366 &dev_attr_em_message_type,
367 &dev_attr_em_message,
368 &dev_attr_ahci_host_caps,
369 &dev_attr_ahci_host_cap2,
370 &dev_attr_ahci_host_version,
371 &dev_attr_ahci_port_cmd,
375 static struct device_attribute *ahci_sdev_attrs[] = {
376 &dev_attr_sw_activity,
377 &dev_attr_unload_heads,
381 static struct scsi_host_template ahci_sht = {
382 ATA_NCQ_SHT(DRV_NAME),
383 .can_queue = AHCI_MAX_CMDS - 1,
384 .sg_tablesize = AHCI_MAX_SG,
385 .dma_boundary = AHCI_DMA_BOUNDARY,
386 .shost_attrs = ahci_shost_attrs,
387 .sdev_attrs = ahci_sdev_attrs,
390 static struct ata_port_operations ahci_ops = {
391 .inherits = &sata_pmp_port_ops,
393 .qc_defer = sata_pmp_qc_defer_cmd_switch,
394 .qc_prep = ahci_qc_prep,
395 .qc_issue = ahci_qc_issue,
396 .qc_fill_rtf = ahci_qc_fill_rtf,
398 .freeze = ahci_freeze,
400 .softreset = ahci_softreset,
401 .hardreset = ahci_hardreset,
402 .postreset = ahci_postreset,
403 .pmp_softreset = ahci_softreset,
404 .error_handler = ahci_error_handler,
405 .post_internal_cmd = ahci_post_internal_cmd,
406 .dev_config = ahci_dev_config,
408 .scr_read = ahci_scr_read,
409 .scr_write = ahci_scr_write,
410 .pmp_attach = ahci_pmp_attach,
411 .pmp_detach = ahci_pmp_detach,
413 .enable_pm = ahci_enable_alpm,
414 .disable_pm = ahci_disable_alpm,
415 .em_show = ahci_led_show,
416 .em_store = ahci_led_store,
417 .sw_activity_show = ahci_activity_show,
418 .sw_activity_store = ahci_activity_store,
420 .port_suspend = ahci_port_suspend,
421 .port_resume = ahci_port_resume,
423 .port_start = ahci_port_start,
424 .port_stop = ahci_port_stop,
427 static struct ata_port_operations ahci_vt8251_ops = {
428 .inherits = &ahci_ops,
429 .hardreset = ahci_vt8251_hardreset,
432 static struct ata_port_operations ahci_p5wdh_ops = {
433 .inherits = &ahci_ops,
434 .hardreset = ahci_p5wdh_hardreset,
437 static struct ata_port_operations ahci_sb600_ops = {
438 .inherits = &ahci_ops,
439 .softreset = ahci_sb600_softreset,
440 .pmp_softreset = ahci_sb600_softreset,
443 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
445 static const struct ata_port_info ahci_port_info[] = {
448 .flags = AHCI_FLAG_COMMON,
449 .pio_mask = ATA_PIO4,
450 .udma_mask = ATA_UDMA6,
451 .port_ops = &ahci_ops,
453 [board_ahci_vt8251] =
455 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
456 .flags = AHCI_FLAG_COMMON,
457 .pio_mask = ATA_PIO4,
458 .udma_mask = ATA_UDMA6,
459 .port_ops = &ahci_vt8251_ops,
461 [board_ahci_ign_iferr] =
463 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
464 .flags = AHCI_FLAG_COMMON,
465 .pio_mask = ATA_PIO4,
466 .udma_mask = ATA_UDMA6,
467 .port_ops = &ahci_ops,
471 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
472 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
473 AHCI_HFLAG_32BIT_ONLY),
474 .flags = AHCI_FLAG_COMMON,
475 .pio_mask = ATA_PIO4,
476 .udma_mask = ATA_UDMA6,
477 .port_ops = &ahci_sb600_ops,
481 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
482 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
483 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
484 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
485 .pio_mask = ATA_PIO4,
486 .udma_mask = ATA_UDMA6,
487 .port_ops = &ahci_ops,
489 [board_ahci_sb700] = /* for SB700 and SB800 */
491 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
492 .flags = AHCI_FLAG_COMMON,
493 .pio_mask = ATA_PIO4,
494 .udma_mask = ATA_UDMA6,
495 .port_ops = &ahci_sb600_ops,
499 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
500 .flags = AHCI_FLAG_COMMON,
501 .pio_mask = ATA_PIO4,
502 .udma_mask = ATA_UDMA6,
503 .port_ops = &ahci_ops,
507 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
508 .flags = AHCI_FLAG_COMMON,
509 .pio_mask = ATA_PIO4,
510 .udma_mask = ATA_UDMA6,
511 .port_ops = &ahci_ops,
513 [board_ahci_yesncq] =
515 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
516 .flags = AHCI_FLAG_COMMON,
517 .pio_mask = ATA_PIO4,
518 .udma_mask = ATA_UDMA6,
519 .port_ops = &ahci_ops,
521 [board_ahci_nosntf] =
523 AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
524 .flags = AHCI_FLAG_COMMON,
525 .pio_mask = ATA_PIO4,
526 .udma_mask = ATA_UDMA6,
527 .port_ops = &ahci_ops,
531 static const struct pci_device_id ahci_pci_tbl[] = {
533 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
534 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
535 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
536 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
537 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
538 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
539 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
540 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
541 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
542 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
543 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
544 { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */
545 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
546 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
547 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
548 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
549 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
550 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
551 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
552 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
553 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
554 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
555 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
556 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
557 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
558 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
559 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
560 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
561 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
562 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
563 { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
564 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
565 { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
566 { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
567 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
568 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
569 { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
570 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
571 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
572 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
573 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
574 { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
575 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
576 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
577 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
578 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
579 { PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
580 { PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
581 { PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
583 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
584 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
585 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
588 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
589 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
590 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
591 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
592 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
593 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
594 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
597 { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
598 /* AMD is using RAID class only for ahci controllers */
599 { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
600 PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
603 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
604 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
607 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
608 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
609 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
610 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
611 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
612 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
613 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
614 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
615 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
616 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
617 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
618 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
619 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
620 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
621 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
622 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
623 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
624 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
625 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
626 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
627 { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */
628 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
629 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
630 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
631 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
632 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
633 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
634 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
635 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
636 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
637 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
638 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
639 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
640 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
641 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
642 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
643 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
644 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
645 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
646 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
647 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
648 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
649 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
650 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
651 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
652 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
653 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
654 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
655 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
656 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
657 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
658 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
659 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
660 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
661 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
662 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
663 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
664 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
665 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
666 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
667 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
668 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
669 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
670 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
671 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
672 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
673 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
674 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
675 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
678 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
679 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
680 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
683 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
684 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
687 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
689 /* Generic, PCI class code for AHCI */
690 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
691 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
693 { } /* terminate list */
697 static struct pci_driver ahci_pci_driver = {
699 .id_table = ahci_pci_tbl,
700 .probe = ahci_init_one,
701 .remove = ata_pci_remove_one,
703 .suspend = ahci_pci_device_suspend,
704 .resume = ahci_pci_device_resume,
708 static int ahci_em_messages = 1;
709 module_param(ahci_em_messages, int, 0444);
710 /* add other LED protocol types when they become supported */
711 MODULE_PARM_DESC(ahci_em_messages,
712 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
714 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
715 static int marvell_enable;
717 static int marvell_enable = 1;
719 module_param(marvell_enable, int, 0644);
720 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
723 static inline int ahci_nr_ports(u32 cap)
725 return (cap & 0x1f) + 1;
728 static inline void __iomem *__ahci_port_base(struct ata_host *host,
729 unsigned int port_no)
731 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
733 return mmio + 0x100 + (port_no * 0x80);
736 static inline void __iomem *ahci_port_base(struct ata_port *ap)
738 return __ahci_port_base(ap->host, ap->port_no);
741 static void ahci_enable_ahci(void __iomem *mmio)
746 /* turn on AHCI_EN */
747 tmp = readl(mmio + HOST_CTL);
748 if (tmp & HOST_AHCI_EN)
751 /* Some controllers need AHCI_EN to be written multiple times.
752 * Try a few times before giving up.
754 for (i = 0; i < 5; i++) {
756 writel(tmp, mmio + HOST_CTL);
757 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
758 if (tmp & HOST_AHCI_EN)
766 static ssize_t ahci_show_host_caps(struct device *dev,
767 struct device_attribute *attr, char *buf)
769 struct Scsi_Host *shost = class_to_shost(dev);
770 struct ata_port *ap = ata_shost_to_port(shost);
771 struct ahci_host_priv *hpriv = ap->host->private_data;
773 return sprintf(buf, "%x\n", hpriv->cap);
776 static ssize_t ahci_show_host_cap2(struct device *dev,
777 struct device_attribute *attr, char *buf)
779 struct Scsi_Host *shost = class_to_shost(dev);
780 struct ata_port *ap = ata_shost_to_port(shost);
781 struct ahci_host_priv *hpriv = ap->host->private_data;
783 return sprintf(buf, "%x\n", hpriv->cap2);
786 static ssize_t ahci_show_host_version(struct device *dev,
787 struct device_attribute *attr, char *buf)
789 struct Scsi_Host *shost = class_to_shost(dev);
790 struct ata_port *ap = ata_shost_to_port(shost);
791 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
793 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
796 static ssize_t ahci_show_port_cmd(struct device *dev,
797 struct device_attribute *attr, char *buf)
799 struct Scsi_Host *shost = class_to_shost(dev);
800 struct ata_port *ap = ata_shost_to_port(shost);
801 void __iomem *port_mmio = ahci_port_base(ap);
803 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
807 * ahci_save_initial_config - Save and fixup initial config values
808 * @pdev: target PCI device
809 * @hpriv: host private area to store config values
811 * Some registers containing configuration info might be setup by
812 * BIOS and might be cleared on reset. This function saves the
813 * initial values of those registers into @hpriv such that they
814 * can be restored after controller reset.
816 * If inconsistent, config values are fixed up by this function.
821 static void ahci_save_initial_config(struct pci_dev *pdev,
822 struct ahci_host_priv *hpriv)
824 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
825 u32 cap, cap2, vers, port_map;
829 /* make sure AHCI mode is enabled before accessing CAP */
830 ahci_enable_ahci(mmio);
832 /* Values prefixed with saved_ are written back to host after
833 * reset. Values without are used for driver operation.
835 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
836 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
838 /* CAP2 register is only defined for AHCI 1.2 and later */
839 vers = readl(mmio + HOST_VERSION);
840 if ((vers >> 16) > 1 ||
841 ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
842 hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
844 hpriv->saved_cap2 = cap2 = 0;
846 /* some chips have errata preventing 64bit use */
847 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
848 dev_printk(KERN_INFO, &pdev->dev,
849 "controller can't do 64bit DMA, forcing 32bit\n");
853 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
854 dev_printk(KERN_INFO, &pdev->dev,
855 "controller can't do NCQ, turning off CAP_NCQ\n");
856 cap &= ~HOST_CAP_NCQ;
859 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
860 dev_printk(KERN_INFO, &pdev->dev,
861 "controller can do NCQ, turning on CAP_NCQ\n");
865 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
866 dev_printk(KERN_INFO, &pdev->dev,
867 "controller can't do PMP, turning off CAP_PMP\n");
868 cap &= ~HOST_CAP_PMP;
871 if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
872 dev_printk(KERN_INFO, &pdev->dev,
873 "controller can't do SNTF, turning off CAP_SNTF\n");
874 cap &= ~HOST_CAP_SNTF;
877 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
879 dev_printk(KERN_INFO, &pdev->dev,
880 "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
886 * Temporary Marvell 6145 hack: PATA port presence
887 * is asserted through the standard AHCI port
888 * presence register, as bit 4 (counting from 0)
890 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
891 if (pdev->device == 0x6121)
895 dev_printk(KERN_ERR, &pdev->dev,
896 "MV_AHCI HACK: port_map %x -> %x\n",
899 dev_printk(KERN_ERR, &pdev->dev,
900 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
905 /* cross check port_map and cap.n_ports */
909 for (i = 0; i < AHCI_MAX_PORTS; i++)
910 if (port_map & (1 << i))
913 /* If PI has more ports than n_ports, whine, clear
914 * port_map and let it be generated from n_ports.
916 if (map_ports > ahci_nr_ports(cap)) {
917 dev_printk(KERN_WARNING, &pdev->dev,
918 "implemented port map (0x%x) contains more "
919 "ports than nr_ports (%u), using nr_ports\n",
920 port_map, ahci_nr_ports(cap));
925 /* fabricate port_map from cap.nr_ports */
927 port_map = (1 << ahci_nr_ports(cap)) - 1;
928 dev_printk(KERN_WARNING, &pdev->dev,
929 "forcing PORTS_IMPL to 0x%x\n", port_map);
931 /* write the fixed up value to the PI register */
932 hpriv->saved_port_map = port_map;
935 /* record values to use during operation */
938 hpriv->port_map = port_map;
942 * ahci_restore_initial_config - Restore initial config
943 * @host: target ATA host
945 * Restore initial config stored by ahci_save_initial_config().
950 static void ahci_restore_initial_config(struct ata_host *host)
952 struct ahci_host_priv *hpriv = host->private_data;
953 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
955 writel(hpriv->saved_cap, mmio + HOST_CAP);
956 if (hpriv->saved_cap2)
957 writel(hpriv->saved_cap2, mmio + HOST_CAP2);
958 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
959 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
962 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
964 static const int offset[] = {
965 [SCR_STATUS] = PORT_SCR_STAT,
966 [SCR_CONTROL] = PORT_SCR_CTL,
967 [SCR_ERROR] = PORT_SCR_ERR,
968 [SCR_ACTIVE] = PORT_SCR_ACT,
969 [SCR_NOTIFICATION] = PORT_SCR_NTF,
971 struct ahci_host_priv *hpriv = ap->host->private_data;
973 if (sc_reg < ARRAY_SIZE(offset) &&
974 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
975 return offset[sc_reg];
979 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
981 void __iomem *port_mmio = ahci_port_base(link->ap);
982 int offset = ahci_scr_offset(link->ap, sc_reg);
985 *val = readl(port_mmio + offset);
991 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
993 void __iomem *port_mmio = ahci_port_base(link->ap);
994 int offset = ahci_scr_offset(link->ap, sc_reg);
997 writel(val, port_mmio + offset);
1003 static void ahci_start_engine(struct ata_port *ap)
1005 void __iomem *port_mmio = ahci_port_base(ap);
1009 tmp = readl(port_mmio + PORT_CMD);
1010 tmp |= PORT_CMD_START;
1011 writel(tmp, port_mmio + PORT_CMD);
1012 readl(port_mmio + PORT_CMD); /* flush */
1015 static int ahci_stop_engine(struct ata_port *ap)
1017 void __iomem *port_mmio = ahci_port_base(ap);
1020 tmp = readl(port_mmio + PORT_CMD);
1022 /* check if the HBA is idle */
1023 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
1026 /* setting HBA to idle */
1027 tmp &= ~PORT_CMD_START;
1028 writel(tmp, port_mmio + PORT_CMD);
1030 /* wait for engine to stop. This could be as long as 500 msec */
1031 tmp = ata_wait_register(port_mmio + PORT_CMD,
1032 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
1033 if (tmp & PORT_CMD_LIST_ON)
1039 static void ahci_start_fis_rx(struct ata_port *ap)
1041 void __iomem *port_mmio = ahci_port_base(ap);
1042 struct ahci_host_priv *hpriv = ap->host->private_data;
1043 struct ahci_port_priv *pp = ap->private_data;
1046 /* set FIS registers */
1047 if (hpriv->cap & HOST_CAP_64)
1048 writel((pp->cmd_slot_dma >> 16) >> 16,
1049 port_mmio + PORT_LST_ADDR_HI);
1050 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
1052 if (hpriv->cap & HOST_CAP_64)
1053 writel((pp->rx_fis_dma >> 16) >> 16,
1054 port_mmio + PORT_FIS_ADDR_HI);
1055 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
1057 /* enable FIS reception */
1058 tmp = readl(port_mmio + PORT_CMD);
1059 tmp |= PORT_CMD_FIS_RX;
1060 writel(tmp, port_mmio + PORT_CMD);
1063 readl(port_mmio + PORT_CMD);
1066 static int ahci_stop_fis_rx(struct ata_port *ap)
1068 void __iomem *port_mmio = ahci_port_base(ap);
1071 /* disable FIS reception */
1072 tmp = readl(port_mmio + PORT_CMD);
1073 tmp &= ~PORT_CMD_FIS_RX;
1074 writel(tmp, port_mmio + PORT_CMD);
1076 /* wait for completion, spec says 500ms, give it 1000 */
1077 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
1078 PORT_CMD_FIS_ON, 10, 1000);
1079 if (tmp & PORT_CMD_FIS_ON)
1085 static void ahci_power_up(struct ata_port *ap)
1087 struct ahci_host_priv *hpriv = ap->host->private_data;
1088 void __iomem *port_mmio = ahci_port_base(ap);
1091 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1093 /* spin up device */
1094 if (hpriv->cap & HOST_CAP_SSS) {
1095 cmd |= PORT_CMD_SPIN_UP;
1096 writel(cmd, port_mmio + PORT_CMD);
1100 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
1103 static void ahci_disable_alpm(struct ata_port *ap)
1105 struct ahci_host_priv *hpriv = ap->host->private_data;
1106 void __iomem *port_mmio = ahci_port_base(ap);
1108 struct ahci_port_priv *pp = ap->private_data;
1110 /* IPM bits should be disabled by libata-core */
1111 /* get the existing command bits */
1112 cmd = readl(port_mmio + PORT_CMD);
1114 /* disable ALPM and ASP */
1115 cmd &= ~PORT_CMD_ASP;
1116 cmd &= ~PORT_CMD_ALPE;
1118 /* force the interface back to active */
1119 cmd |= PORT_CMD_ICC_ACTIVE;
1121 /* write out new cmd value */
1122 writel(cmd, port_mmio + PORT_CMD);
1123 cmd = readl(port_mmio + PORT_CMD);
1125 /* wait 10ms to be sure we've come out of any low power state */
1128 /* clear out any PhyRdy stuff from interrupt status */
1129 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
1131 /* go ahead and clean out PhyRdy Change from Serror too */
1132 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1135 * Clear flag to indicate that we should ignore all PhyRdy
1138 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1141 * Enable interrupts on Phy Ready.
1143 pp->intr_mask |= PORT_IRQ_PHYRDY;
1144 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1147 * don't change the link pm policy - we can be called
1148 * just to turn of link pm temporarily
1152 static int ahci_enable_alpm(struct ata_port *ap,
1153 enum link_pm policy)
1155 struct ahci_host_priv *hpriv = ap->host->private_data;
1156 void __iomem *port_mmio = ahci_port_base(ap);
1158 struct ahci_port_priv *pp = ap->private_data;
1161 /* Make sure the host is capable of link power management */
1162 if (!(hpriv->cap & HOST_CAP_ALPM))
1166 case MAX_PERFORMANCE:
1169 * if we came here with NOT_AVAILABLE,
1170 * it just means this is the first time we
1171 * have tried to enable - default to max performance,
1172 * and let the user go to lower power modes on request.
1174 ahci_disable_alpm(ap);
1177 /* configure HBA to enter SLUMBER */
1181 /* configure HBA to enter PARTIAL */
1189 * Disable interrupts on Phy Ready. This keeps us from
1190 * getting woken up due to spurious phy ready interrupts
1191 * TBD - Hot plug should be done via polling now, is
1192 * that even supported?
1194 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1195 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1198 * Set a flag to indicate that we should ignore all PhyRdy
1199 * state changes since these can happen now whenever we
1202 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1204 /* get the existing command bits */
1205 cmd = readl(port_mmio + PORT_CMD);
1208 * Set ASP based on Policy
1213 * Setting this bit will instruct the HBA to aggressively
1214 * enter a lower power link state when it's appropriate and
1215 * based on the value set above for ASP
1217 cmd |= PORT_CMD_ALPE;
1219 /* write out new cmd value */
1220 writel(cmd, port_mmio + PORT_CMD);
1221 cmd = readl(port_mmio + PORT_CMD);
1223 /* IPM bits should be set by libata-core */
1228 static void ahci_power_down(struct ata_port *ap)
1230 struct ahci_host_priv *hpriv = ap->host->private_data;
1231 void __iomem *port_mmio = ahci_port_base(ap);
1234 if (!(hpriv->cap & HOST_CAP_SSS))
1237 /* put device into listen mode, first set PxSCTL.DET to 0 */
1238 scontrol = readl(port_mmio + PORT_SCR_CTL);
1240 writel(scontrol, port_mmio + PORT_SCR_CTL);
1242 /* then set PxCMD.SUD to 0 */
1243 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1244 cmd &= ~PORT_CMD_SPIN_UP;
1245 writel(cmd, port_mmio + PORT_CMD);
1249 static void ahci_start_port(struct ata_port *ap)
1251 struct ahci_port_priv *pp = ap->private_data;
1252 struct ata_link *link;
1253 struct ahci_em_priv *emp;
1257 /* enable FIS reception */
1258 ahci_start_fis_rx(ap);
1261 ahci_start_engine(ap);
1264 if (ap->flags & ATA_FLAG_EM) {
1265 ata_for_each_link(link, ap, EDGE) {
1266 emp = &pp->em_priv[link->pmp];
1268 /* EM Transmit bit maybe busy during init */
1269 for (i = 0; i < EM_MAX_RETRY; i++) {
1270 rc = ahci_transmit_led_message(ap,
1281 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1282 ata_for_each_link(link, ap, EDGE)
1283 ahci_init_sw_activity(link);
1287 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1292 rc = ahci_stop_engine(ap);
1294 *emsg = "failed to stop engine";
1298 /* disable FIS reception */
1299 rc = ahci_stop_fis_rx(ap);
1301 *emsg = "failed stop FIS RX";
1308 static int ahci_reset_controller(struct ata_host *host)
1310 struct pci_dev *pdev = to_pci_dev(host->dev);
1311 struct ahci_host_priv *hpriv = host->private_data;
1312 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1315 /* we must be in AHCI mode, before using anything
1316 * AHCI-specific, such as HOST_RESET.
1318 ahci_enable_ahci(mmio);
1320 /* global controller reset */
1321 if (!ahci_skip_host_reset) {
1322 tmp = readl(mmio + HOST_CTL);
1323 if ((tmp & HOST_RESET) == 0) {
1324 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1325 readl(mmio + HOST_CTL); /* flush */
1329 * to perform host reset, OS should set HOST_RESET
1330 * and poll until this bit is read to be "0".
1331 * reset must complete within 1 second, or
1332 * the hardware should be considered fried.
1334 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1335 HOST_RESET, 10, 1000);
1337 if (tmp & HOST_RESET) {
1338 dev_printk(KERN_ERR, host->dev,
1339 "controller reset failed (0x%x)\n", tmp);
1343 /* turn on AHCI mode */
1344 ahci_enable_ahci(mmio);
1346 /* Some registers might be cleared on reset. Restore
1349 ahci_restore_initial_config(host);
1351 dev_printk(KERN_INFO, host->dev,
1352 "skipping global host reset\n");
1354 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1358 pci_read_config_word(pdev, 0x92, &tmp16);
1359 if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1360 tmp16 |= hpriv->port_map;
1361 pci_write_config_word(pdev, 0x92, tmp16);
1368 static void ahci_sw_activity(struct ata_link *link)
1370 struct ata_port *ap = link->ap;
1371 struct ahci_port_priv *pp = ap->private_data;
1372 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1374 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1378 if (!timer_pending(&emp->timer))
1379 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1382 static void ahci_sw_activity_blink(unsigned long arg)
1384 struct ata_link *link = (struct ata_link *)arg;
1385 struct ata_port *ap = link->ap;
1386 struct ahci_port_priv *pp = ap->private_data;
1387 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1388 unsigned long led_message = emp->led_state;
1389 u32 activity_led_state;
1390 unsigned long flags;
1392 led_message &= EM_MSG_LED_VALUE;
1393 led_message |= ap->port_no | (link->pmp << 8);
1395 /* check to see if we've had activity. If so,
1396 * toggle state of LED and reset timer. If not,
1397 * turn LED to desired idle state.
1399 spin_lock_irqsave(ap->lock, flags);
1400 if (emp->saved_activity != emp->activity) {
1401 emp->saved_activity = emp->activity;
1402 /* get the current LED state */
1403 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1405 if (activity_led_state)
1406 activity_led_state = 0;
1408 activity_led_state = 1;
1410 /* clear old state */
1411 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1414 led_message |= (activity_led_state << 16);
1415 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1417 /* switch to idle */
1418 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1419 if (emp->blink_policy == BLINK_OFF)
1420 led_message |= (1 << 16);
1422 spin_unlock_irqrestore(ap->lock, flags);
1423 ahci_transmit_led_message(ap, led_message, 4);
1426 static void ahci_init_sw_activity(struct ata_link *link)
1428 struct ata_port *ap = link->ap;
1429 struct ahci_port_priv *pp = ap->private_data;
1430 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1432 /* init activity stats, setup timer */
1433 emp->saved_activity = emp->activity = 0;
1434 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1436 /* check our blink policy and set flag for link if it's enabled */
1437 if (emp->blink_policy)
1438 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1441 static int ahci_reset_em(struct ata_host *host)
1443 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1446 em_ctl = readl(mmio + HOST_EM_CTL);
1447 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1450 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1454 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1457 struct ahci_host_priv *hpriv = ap->host->private_data;
1458 struct ahci_port_priv *pp = ap->private_data;
1459 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1461 u32 message[] = {0, 0};
1462 unsigned long flags;
1464 struct ahci_em_priv *emp;
1466 /* get the slot number from the message */
1467 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1468 if (pmp < EM_MAX_SLOTS)
1469 emp = &pp->em_priv[pmp];
1473 spin_lock_irqsave(ap->lock, flags);
1476 * if we are still busy transmitting a previous message,
1479 em_ctl = readl(mmio + HOST_EM_CTL);
1480 if (em_ctl & EM_CTL_TM) {
1481 spin_unlock_irqrestore(ap->lock, flags);
1486 * create message header - this is all zero except for
1487 * the message size, which is 4 bytes.
1489 message[0] |= (4 << 8);
1491 /* ignore 0:4 of byte zero, fill in port info yourself */
1492 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1494 /* write message to EM_LOC */
1495 writel(message[0], mmio + hpriv->em_loc);
1496 writel(message[1], mmio + hpriv->em_loc+4);
1498 /* save off new led state for port/slot */
1499 emp->led_state = state;
1502 * tell hardware to transmit the message
1504 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1506 spin_unlock_irqrestore(ap->lock, flags);
1510 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1512 struct ahci_port_priv *pp = ap->private_data;
1513 struct ata_link *link;
1514 struct ahci_em_priv *emp;
1517 ata_for_each_link(link, ap, EDGE) {
1518 emp = &pp->em_priv[link->pmp];
1519 rc += sprintf(buf, "%lx\n", emp->led_state);
1524 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1529 struct ahci_port_priv *pp = ap->private_data;
1530 struct ahci_em_priv *emp;
1532 state = simple_strtoul(buf, NULL, 0);
1534 /* get the slot number from the message */
1535 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1536 if (pmp < EM_MAX_SLOTS)
1537 emp = &pp->em_priv[pmp];
1541 /* mask off the activity bits if we are in sw_activity
1542 * mode, user should turn off sw_activity before setting
1543 * activity led through em_message
1545 if (emp->blink_policy)
1546 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1548 return ahci_transmit_led_message(ap, state, size);
1551 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1553 struct ata_link *link = dev->link;
1554 struct ata_port *ap = link->ap;
1555 struct ahci_port_priv *pp = ap->private_data;
1556 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1557 u32 port_led_state = emp->led_state;
1559 /* save the desired Activity LED behavior */
1562 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1564 /* set the LED to OFF */
1565 port_led_state &= EM_MSG_LED_VALUE_OFF;
1566 port_led_state |= (ap->port_no | (link->pmp << 8));
1567 ahci_transmit_led_message(ap, port_led_state, 4);
1569 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1570 if (val == BLINK_OFF) {
1571 /* set LED to ON for idle */
1572 port_led_state &= EM_MSG_LED_VALUE_OFF;
1573 port_led_state |= (ap->port_no | (link->pmp << 8));
1574 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1575 ahci_transmit_led_message(ap, port_led_state, 4);
1578 emp->blink_policy = val;
1582 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1584 struct ata_link *link = dev->link;
1585 struct ata_port *ap = link->ap;
1586 struct ahci_port_priv *pp = ap->private_data;
1587 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1589 /* display the saved value of activity behavior for this
1592 return sprintf(buf, "%d\n", emp->blink_policy);
1595 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1596 int port_no, void __iomem *mmio,
1597 void __iomem *port_mmio)
1599 const char *emsg = NULL;
1603 /* make sure port is not active */
1604 rc = ahci_deinit_port(ap, &emsg);
1606 dev_printk(KERN_WARNING, &pdev->dev,
1607 "%s (%d)\n", emsg, rc);
1610 tmp = readl(port_mmio + PORT_SCR_ERR);
1611 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1612 writel(tmp, port_mmio + PORT_SCR_ERR);
1614 /* clear port IRQ */
1615 tmp = readl(port_mmio + PORT_IRQ_STAT);
1616 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1618 writel(tmp, port_mmio + PORT_IRQ_STAT);
1620 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1623 static void ahci_init_controller(struct ata_host *host)
1625 struct ahci_host_priv *hpriv = host->private_data;
1626 struct pci_dev *pdev = to_pci_dev(host->dev);
1627 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1629 void __iomem *port_mmio;
1633 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1634 if (pdev->device == 0x6121)
1638 port_mmio = __ahci_port_base(host, mv);
1640 writel(0, port_mmio + PORT_IRQ_MASK);
1642 /* clear port IRQ */
1643 tmp = readl(port_mmio + PORT_IRQ_STAT);
1644 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1646 writel(tmp, port_mmio + PORT_IRQ_STAT);
1649 for (i = 0; i < host->n_ports; i++) {
1650 struct ata_port *ap = host->ports[i];
1652 port_mmio = ahci_port_base(ap);
1653 if (ata_port_is_dummy(ap))
1656 ahci_port_init(pdev, ap, i, mmio, port_mmio);
1659 tmp = readl(mmio + HOST_CTL);
1660 VPRINTK("HOST_CTL 0x%x\n", tmp);
1661 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1662 tmp = readl(mmio + HOST_CTL);
1663 VPRINTK("HOST_CTL 0x%x\n", tmp);
1666 static void ahci_dev_config(struct ata_device *dev)
1668 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1670 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1671 dev->max_sectors = 255;
1672 ata_dev_printk(dev, KERN_INFO,
1673 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1677 static unsigned int ahci_dev_classify(struct ata_port *ap)
1679 void __iomem *port_mmio = ahci_port_base(ap);
1680 struct ata_taskfile tf;
1683 tmp = readl(port_mmio + PORT_SIG);
1684 tf.lbah = (tmp >> 24) & 0xff;
1685 tf.lbam = (tmp >> 16) & 0xff;
1686 tf.lbal = (tmp >> 8) & 0xff;
1687 tf.nsect = (tmp) & 0xff;
1689 return ata_dev_classify(&tf);
1692 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1695 dma_addr_t cmd_tbl_dma;
1697 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1699 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1700 pp->cmd_slot[tag].status = 0;
1701 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1702 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1705 static int ahci_kick_engine(struct ata_port *ap)
1707 void __iomem *port_mmio = ahci_port_base(ap);
1708 struct ahci_host_priv *hpriv = ap->host->private_data;
1709 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1714 rc = ahci_stop_engine(ap);
1719 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1721 busy = status & (ATA_BUSY | ATA_DRQ);
1722 if (!busy && !sata_pmp_attached(ap)) {
1727 if (!(hpriv->cap & HOST_CAP_CLO)) {
1733 tmp = readl(port_mmio + PORT_CMD);
1734 tmp |= PORT_CMD_CLO;
1735 writel(tmp, port_mmio + PORT_CMD);
1738 tmp = ata_wait_register(port_mmio + PORT_CMD,
1739 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1740 if (tmp & PORT_CMD_CLO)
1743 /* restart engine */
1745 ahci_start_engine(ap);
1749 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1750 struct ata_taskfile *tf, int is_cmd, u16 flags,
1751 unsigned long timeout_msec)
1753 const u32 cmd_fis_len = 5; /* five dwords */
1754 struct ahci_port_priv *pp = ap->private_data;
1755 void __iomem *port_mmio = ahci_port_base(ap);
1756 u8 *fis = pp->cmd_tbl;
1759 /* prep the command */
1760 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1761 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1764 writel(1, port_mmio + PORT_CMD_ISSUE);
1767 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1770 ahci_kick_engine(ap);
1774 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1779 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1780 int pmp, unsigned long deadline,
1781 int (*check_ready)(struct ata_link *link))
1783 struct ata_port *ap = link->ap;
1784 struct ahci_host_priv *hpriv = ap->host->private_data;
1785 const char *reason = NULL;
1786 unsigned long now, msecs;
1787 struct ata_taskfile tf;
1792 /* prepare for SRST (AHCI-1.1 10.4.1) */
1793 rc = ahci_kick_engine(ap);
1794 if (rc && rc != -EOPNOTSUPP)
1795 ata_link_printk(link, KERN_WARNING,
1796 "failed to reset engine (errno=%d)\n", rc);
1798 ata_tf_init(link->device, &tf);
1800 /* issue the first D2H Register FIS */
1803 if (time_after(now, deadline))
1804 msecs = jiffies_to_msecs(deadline - now);
1807 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1808 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1810 reason = "1st FIS failed";
1814 /* spec says at least 5us, but be generous and sleep for 1ms */
1817 /* issue the second D2H Register FIS */
1818 tf.ctl &= ~ATA_SRST;
1819 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1821 /* wait for link to become ready */
1822 rc = ata_wait_after_reset(link, deadline, check_ready);
1823 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1825 * Workaround for cases where link online status can't
1826 * be trusted. Treat device readiness timeout as link
1829 ata_link_printk(link, KERN_INFO,
1830 "device not ready, treating as offline\n");
1831 *class = ATA_DEV_NONE;
1833 /* link occupied, -ENODEV too is an error */
1834 reason = "device not ready";
1837 *class = ahci_dev_classify(ap);
1839 DPRINTK("EXIT, class=%u\n", *class);
1843 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1847 static int ahci_check_ready(struct ata_link *link)
1849 void __iomem *port_mmio = ahci_port_base(link->ap);
1850 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1852 return ata_check_ready(status);
1855 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1856 unsigned long deadline)
1858 int pmp = sata_srst_pmp(link);
1862 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1865 static int ahci_sb600_check_ready(struct ata_link *link)
1867 void __iomem *port_mmio = ahci_port_base(link->ap);
1868 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1869 u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1872 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1873 * which can save timeout delay.
1875 if (irq_status & PORT_IRQ_BAD_PMP)
1878 return ata_check_ready(status);
1881 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1882 unsigned long deadline)
1884 struct ata_port *ap = link->ap;
1885 void __iomem *port_mmio = ahci_port_base(ap);
1886 int pmp = sata_srst_pmp(link);
1892 rc = ahci_do_softreset(link, class, pmp, deadline,
1893 ahci_sb600_check_ready);
1896 * Soft reset fails on some ATI chips with IPMS set when PMP
1897 * is enabled but SATA HDD/ODD is connected to SATA port,
1898 * do soft reset again to port 0.
1901 irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1902 if (irq_sts & PORT_IRQ_BAD_PMP) {
1903 ata_link_printk(link, KERN_WARNING,
1904 "applying SB600 PMP SRST workaround "
1906 rc = ahci_do_softreset(link, class, 0, deadline,
1914 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1915 unsigned long deadline)
1917 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1918 struct ata_port *ap = link->ap;
1919 struct ahci_port_priv *pp = ap->private_data;
1920 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1921 struct ata_taskfile tf;
1927 ahci_stop_engine(ap);
1929 /* clear D2H reception area to properly wait for D2H FIS */
1930 ata_tf_init(link->device, &tf);
1932 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1934 rc = sata_link_hardreset(link, timing, deadline, &online,
1937 ahci_start_engine(ap);
1940 *class = ahci_dev_classify(ap);
1942 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1946 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1947 unsigned long deadline)
1949 struct ata_port *ap = link->ap;
1955 ahci_stop_engine(ap);
1957 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1958 deadline, &online, NULL);
1960 ahci_start_engine(ap);
1962 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1964 /* vt8251 doesn't clear BSY on signature FIS reception,
1965 * request follow-up softreset.
1967 return online ? -EAGAIN : rc;
1970 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1971 unsigned long deadline)
1973 struct ata_port *ap = link->ap;
1974 struct ahci_port_priv *pp = ap->private_data;
1975 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1976 struct ata_taskfile tf;
1980 ahci_stop_engine(ap);
1982 /* clear D2H reception area to properly wait for D2H FIS */
1983 ata_tf_init(link->device, &tf);
1985 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1987 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1988 deadline, &online, NULL);
1990 ahci_start_engine(ap);
1992 /* The pseudo configuration device on SIMG4726 attached to
1993 * ASUS P5W-DH Deluxe doesn't send signature FIS after
1994 * hardreset if no device is attached to the first downstream
1995 * port && the pseudo device locks up on SRST w/ PMP==0. To
1996 * work around this, wait for !BSY only briefly. If BSY isn't
1997 * cleared, perform CLO and proceed to IDENTIFY (achieved by
1998 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
2000 * Wait for two seconds. Devices attached to downstream port
2001 * which can't process the following IDENTIFY after this will
2002 * have to be reset again. For most cases, this should
2003 * suffice while making probing snappish enough.
2006 rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
2009 ahci_kick_engine(ap);
2014 static void ahci_postreset(struct ata_link *link, unsigned int *class)
2016 struct ata_port *ap = link->ap;
2017 void __iomem *port_mmio = ahci_port_base(ap);
2020 ata_std_postreset(link, class);
2022 /* Make sure port's ATAPI bit is set appropriately */
2023 new_tmp = tmp = readl(port_mmio + PORT_CMD);
2024 if (*class == ATA_DEV_ATAPI)
2025 new_tmp |= PORT_CMD_ATAPI;
2027 new_tmp &= ~PORT_CMD_ATAPI;
2028 if (new_tmp != tmp) {
2029 writel(new_tmp, port_mmio + PORT_CMD);
2030 readl(port_mmio + PORT_CMD); /* flush */
2034 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
2036 struct scatterlist *sg;
2037 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
2043 * Next, the S/G list.
2045 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2046 dma_addr_t addr = sg_dma_address(sg);
2047 u32 sg_len = sg_dma_len(sg);
2049 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
2050 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
2051 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
2057 static void ahci_qc_prep(struct ata_queued_cmd *qc)
2059 struct ata_port *ap = qc->ap;
2060 struct ahci_port_priv *pp = ap->private_data;
2061 int is_atapi = ata_is_atapi(qc->tf.protocol);
2064 const u32 cmd_fis_len = 5; /* five dwords */
2065 unsigned int n_elem;
2068 * Fill in command table information. First, the header,
2069 * a SATA Register - Host to Device command FIS.
2071 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
2073 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
2075 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
2076 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
2080 if (qc->flags & ATA_QCFLAG_DMAMAP)
2081 n_elem = ahci_fill_sg(qc, cmd_tbl);
2084 * Fill in command slot information.
2086 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
2087 if (qc->tf.flags & ATA_TFLAG_WRITE)
2088 opts |= AHCI_CMD_WRITE;
2090 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
2092 ahci_fill_cmd_slot(pp, qc->tag, opts);
2095 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
2097 struct ahci_host_priv *hpriv = ap->host->private_data;
2098 struct ahci_port_priv *pp = ap->private_data;
2099 struct ata_eh_info *host_ehi = &ap->link.eh_info;
2100 struct ata_link *link = NULL;
2101 struct ata_queued_cmd *active_qc;
2102 struct ata_eh_info *active_ehi;
2105 /* determine active link */
2106 ata_for_each_link(link, ap, EDGE)
2107 if (ata_link_active(link))
2112 active_qc = ata_qc_from_tag(ap, link->active_tag);
2113 active_ehi = &link->eh_info;
2115 /* record irq stat */
2116 ata_ehi_clear_desc(host_ehi);
2117 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
2119 /* AHCI needs SError cleared; otherwise, it might lock up */
2120 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
2121 ahci_scr_write(&ap->link, SCR_ERROR, serror);
2122 host_ehi->serror |= serror;
2124 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
2125 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
2126 irq_stat &= ~PORT_IRQ_IF_ERR;
2128 if (irq_stat & PORT_IRQ_TF_ERR) {
2129 /* If qc is active, charge it; otherwise, the active
2130 * link. There's no active qc on NCQ errors. It will
2131 * be determined by EH by reading log page 10h.
2134 active_qc->err_mask |= AC_ERR_DEV;
2136 active_ehi->err_mask |= AC_ERR_DEV;
2138 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
2139 host_ehi->serror &= ~SERR_INTERNAL;
2142 if (irq_stat & PORT_IRQ_UNK_FIS) {
2143 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
2145 active_ehi->err_mask |= AC_ERR_HSM;
2146 active_ehi->action |= ATA_EH_RESET;
2147 ata_ehi_push_desc(active_ehi,
2148 "unknown FIS %08x %08x %08x %08x" ,
2149 unk[0], unk[1], unk[2], unk[3]);
2152 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
2153 active_ehi->err_mask |= AC_ERR_HSM;
2154 active_ehi->action |= ATA_EH_RESET;
2155 ata_ehi_push_desc(active_ehi, "incorrect PMP");
2158 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2159 host_ehi->err_mask |= AC_ERR_HOST_BUS;
2160 host_ehi->action |= ATA_EH_RESET;
2161 ata_ehi_push_desc(host_ehi, "host bus error");
2164 if (irq_stat & PORT_IRQ_IF_ERR) {
2165 host_ehi->err_mask |= AC_ERR_ATA_BUS;
2166 host_ehi->action |= ATA_EH_RESET;
2167 ata_ehi_push_desc(host_ehi, "interface fatal error");
2170 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2171 ata_ehi_hotplugged(host_ehi);
2172 ata_ehi_push_desc(host_ehi, "%s",
2173 irq_stat & PORT_IRQ_CONNECT ?
2174 "connection status changed" : "PHY RDY changed");
2177 /* okay, let's hand over to EH */
2179 if (irq_stat & PORT_IRQ_FREEZE)
2180 ata_port_freeze(ap);
2185 static void ahci_port_intr(struct ata_port *ap)
2187 void __iomem *port_mmio = ahci_port_base(ap);
2188 struct ata_eh_info *ehi = &ap->link.eh_info;
2189 struct ahci_port_priv *pp = ap->private_data;
2190 struct ahci_host_priv *hpriv = ap->host->private_data;
2191 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2192 u32 status, qc_active;
2195 status = readl(port_mmio + PORT_IRQ_STAT);
2196 writel(status, port_mmio + PORT_IRQ_STAT);
2198 /* ignore BAD_PMP while resetting */
2199 if (unlikely(resetting))
2200 status &= ~PORT_IRQ_BAD_PMP;
2202 /* If we are getting PhyRdy, this is
2203 * just a power state change, we should
2204 * clear out this, plus the PhyRdy/Comm
2205 * Wake bits from Serror
2207 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2208 (status & PORT_IRQ_PHYRDY)) {
2209 status &= ~PORT_IRQ_PHYRDY;
2210 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2213 if (unlikely(status & PORT_IRQ_ERROR)) {
2214 ahci_error_intr(ap, status);
2218 if (status & PORT_IRQ_SDB_FIS) {
2219 /* If SNotification is available, leave notification
2220 * handling to sata_async_notification(). If not,
2221 * emulate it by snooping SDB FIS RX area.
2223 * Snooping FIS RX area is probably cheaper than
2224 * poking SNotification but some constrollers which
2225 * implement SNotification, ICH9 for example, don't
2226 * store AN SDB FIS into receive area.
2228 if (hpriv->cap & HOST_CAP_SNTF)
2229 sata_async_notification(ap);
2231 /* If the 'N' bit in word 0 of the FIS is set,
2232 * we just received asynchronous notification.
2233 * Tell libata about it.
2235 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2236 u32 f0 = le32_to_cpu(f[0]);
2239 sata_async_notification(ap);
2243 /* pp->active_link is valid iff any command is in flight */
2244 if (ap->qc_active && pp->active_link->sactive)
2245 qc_active = readl(port_mmio + PORT_SCR_ACT);
2247 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2249 rc = ata_qc_complete_multiple(ap, qc_active);
2251 /* while resetting, invalid completions are expected */
2252 if (unlikely(rc < 0 && !resetting)) {
2253 ehi->err_mask |= AC_ERR_HSM;
2254 ehi->action |= ATA_EH_RESET;
2255 ata_port_freeze(ap);
2259 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2261 struct ata_host *host = dev_instance;
2262 struct ahci_host_priv *hpriv;
2263 unsigned int i, handled = 0;
2265 u32 irq_stat, irq_masked;
2269 hpriv = host->private_data;
2270 mmio = host->iomap[AHCI_PCI_BAR];
2272 /* sigh. 0xffffffff is a valid return from h/w */
2273 irq_stat = readl(mmio + HOST_IRQ_STAT);
2277 irq_masked = irq_stat & hpriv->port_map;
2279 spin_lock(&host->lock);
2281 for (i = 0; i < host->n_ports; i++) {
2282 struct ata_port *ap;
2284 if (!(irq_masked & (1 << i)))
2287 ap = host->ports[i];
2290 VPRINTK("port %u\n", i);
2292 VPRINTK("port %u (no irq)\n", i);
2293 if (ata_ratelimit())
2294 dev_printk(KERN_WARNING, host->dev,
2295 "interrupt on disabled port %u\n", i);
2301 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2302 * it should be cleared after all the port events are cleared;
2303 * otherwise, it will raise a spurious interrupt after each
2304 * valid one. Please read section 10.6.2 of ahci 1.1 for more
2307 * Also, use the unmasked value to clear interrupt as spurious
2308 * pending event on a dummy port might cause screaming IRQ.
2310 writel(irq_stat, mmio + HOST_IRQ_STAT);
2312 spin_unlock(&host->lock);
2316 return IRQ_RETVAL(handled);
2319 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2321 struct ata_port *ap = qc->ap;
2322 void __iomem *port_mmio = ahci_port_base(ap);
2323 struct ahci_port_priv *pp = ap->private_data;
2325 /* Keep track of the currently active link. It will be used
2326 * in completion path to determine whether NCQ phase is in
2329 pp->active_link = qc->dev->link;
2331 if (qc->tf.protocol == ATA_PROT_NCQ)
2332 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2333 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2335 ahci_sw_activity(qc->dev->link);
2340 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2342 struct ahci_port_priv *pp = qc->ap->private_data;
2343 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2345 ata_tf_from_fis(d2h_fis, &qc->result_tf);
2349 static void ahci_freeze(struct ata_port *ap)
2351 void __iomem *port_mmio = ahci_port_base(ap);
2354 writel(0, port_mmio + PORT_IRQ_MASK);
2357 static void ahci_thaw(struct ata_port *ap)
2359 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2360 void __iomem *port_mmio = ahci_port_base(ap);
2362 struct ahci_port_priv *pp = ap->private_data;
2365 tmp = readl(port_mmio + PORT_IRQ_STAT);
2366 writel(tmp, port_mmio + PORT_IRQ_STAT);
2367 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2369 /* turn IRQ back on */
2370 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2373 static void ahci_error_handler(struct ata_port *ap)
2375 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2376 /* restart engine */
2377 ahci_stop_engine(ap);
2378 ahci_start_engine(ap);
2381 sata_pmp_error_handler(ap);
2384 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2386 struct ata_port *ap = qc->ap;
2388 /* make DMA engine forget about the failed command */
2389 if (qc->flags & ATA_QCFLAG_FAILED)
2390 ahci_kick_engine(ap);
2393 static void ahci_pmp_attach(struct ata_port *ap)
2395 void __iomem *port_mmio = ahci_port_base(ap);
2396 struct ahci_port_priv *pp = ap->private_data;
2399 cmd = readl(port_mmio + PORT_CMD);
2400 cmd |= PORT_CMD_PMP;
2401 writel(cmd, port_mmio + PORT_CMD);
2403 pp->intr_mask |= PORT_IRQ_BAD_PMP;
2404 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2407 static void ahci_pmp_detach(struct ata_port *ap)
2409 void __iomem *port_mmio = ahci_port_base(ap);
2410 struct ahci_port_priv *pp = ap->private_data;
2413 cmd = readl(port_mmio + PORT_CMD);
2414 cmd &= ~PORT_CMD_PMP;
2415 writel(cmd, port_mmio + PORT_CMD);
2417 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2418 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2421 static int ahci_port_resume(struct ata_port *ap)
2424 ahci_start_port(ap);
2426 if (sata_pmp_attached(ap))
2427 ahci_pmp_attach(ap);
2429 ahci_pmp_detach(ap);
2435 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2437 const char *emsg = NULL;
2440 rc = ahci_deinit_port(ap, &emsg);
2442 ahci_power_down(ap);
2444 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2445 ahci_start_port(ap);
2451 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2453 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2454 struct ahci_host_priv *hpriv = host->private_data;
2455 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2458 if (mesg.event & PM_EVENT_SUSPEND &&
2459 hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
2460 dev_printk(KERN_ERR, &pdev->dev,
2461 "BIOS update required for suspend/resume\n");
2465 if (mesg.event & PM_EVENT_SLEEP) {
2466 /* AHCI spec rev1.1 section 8.3.3:
2467 * Software must disable interrupts prior to requesting a
2468 * transition of the HBA to D3 state.
2470 ctl = readl(mmio + HOST_CTL);
2471 ctl &= ~HOST_IRQ_EN;
2472 writel(ctl, mmio + HOST_CTL);
2473 readl(mmio + HOST_CTL); /* flush */
2476 return ata_pci_device_suspend(pdev, mesg);
2479 static int ahci_pci_device_resume(struct pci_dev *pdev)
2481 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2484 rc = ata_pci_device_do_resume(pdev);
2488 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2489 rc = ahci_reset_controller(host);
2493 ahci_init_controller(host);
2496 ata_host_resume(host);
2502 static int ahci_port_start(struct ata_port *ap)
2504 struct device *dev = ap->host->dev;
2505 struct ahci_port_priv *pp;
2509 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2513 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
2517 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
2520 * First item in chunk of DMA memory: 32-slot command table,
2521 * 32 bytes each in size
2524 pp->cmd_slot_dma = mem_dma;
2526 mem += AHCI_CMD_SLOT_SZ;
2527 mem_dma += AHCI_CMD_SLOT_SZ;
2530 * Second item: Received-FIS area
2533 pp->rx_fis_dma = mem_dma;
2535 mem += AHCI_RX_FIS_SZ;
2536 mem_dma += AHCI_RX_FIS_SZ;
2539 * Third item: data area for storing a single command
2540 * and its scatter-gather table
2543 pp->cmd_tbl_dma = mem_dma;
2546 * Save off initial list of interrupts to be enabled.
2547 * This could be changed later
2549 pp->intr_mask = DEF_PORT_IRQ;
2551 ap->private_data = pp;
2553 /* engage engines, captain */
2554 return ahci_port_resume(ap);
2557 static void ahci_port_stop(struct ata_port *ap)
2559 const char *emsg = NULL;
2562 /* de-initialize port */
2563 rc = ahci_deinit_port(ap, &emsg);
2565 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2568 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2573 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2574 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2576 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2578 dev_printk(KERN_ERR, &pdev->dev,
2579 "64-bit DMA enable failed\n");
2584 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2586 dev_printk(KERN_ERR, &pdev->dev,
2587 "32-bit DMA enable failed\n");
2590 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2592 dev_printk(KERN_ERR, &pdev->dev,
2593 "32-bit consistent DMA enable failed\n");
2600 static void ahci_print_info(struct ata_host *host)
2602 struct ahci_host_priv *hpriv = host->private_data;
2603 struct pci_dev *pdev = to_pci_dev(host->dev);
2604 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2605 u32 vers, cap, cap2, impl, speed;
2606 const char *speed_s;
2610 vers = readl(mmio + HOST_VERSION);
2613 impl = hpriv->port_map;
2615 speed = (cap >> 20) & 0xf;
2618 else if (speed == 2)
2620 else if (speed == 3)
2625 pci_read_config_word(pdev, 0x0a, &cc);
2626 if (cc == PCI_CLASS_STORAGE_IDE)
2628 else if (cc == PCI_CLASS_STORAGE_SATA)
2630 else if (cc == PCI_CLASS_STORAGE_RAID)
2635 dev_printk(KERN_INFO, &pdev->dev,
2636 "AHCI %02x%02x.%02x%02x "
2637 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2640 (vers >> 24) & 0xff,
2641 (vers >> 16) & 0xff,
2645 ((cap >> 8) & 0x1f) + 1,
2651 dev_printk(KERN_INFO, &pdev->dev,
2658 cap & HOST_CAP_64 ? "64bit " : "",
2659 cap & HOST_CAP_NCQ ? "ncq " : "",
2660 cap & HOST_CAP_SNTF ? "sntf " : "",
2661 cap & HOST_CAP_MPS ? "ilck " : "",
2662 cap & HOST_CAP_SSS ? "stag " : "",
2663 cap & HOST_CAP_ALPM ? "pm " : "",
2664 cap & HOST_CAP_LED ? "led " : "",
2665 cap & HOST_CAP_CLO ? "clo " : "",
2666 cap & HOST_CAP_ONLY ? "only " : "",
2667 cap & HOST_CAP_PMP ? "pmp " : "",
2668 cap & HOST_CAP_FBS ? "fbs " : "",
2669 cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2670 cap & HOST_CAP_SSC ? "slum " : "",
2671 cap & HOST_CAP_PART ? "part " : "",
2672 cap & HOST_CAP_CCC ? "ccc " : "",
2673 cap & HOST_CAP_EMS ? "ems " : "",
2674 cap & HOST_CAP_SXS ? "sxs " : "",
2675 cap2 & HOST_CAP2_APST ? "apst " : "",
2676 cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2677 cap2 & HOST_CAP2_BOH ? "boh " : ""
2681 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2682 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
2683 * support PMP and the 4726 either directly exports the device
2684 * attached to the first downstream port or acts as a hardware storage
2685 * controller and emulate a single ATA device (can be RAID 0/1 or some
2686 * other configuration).
2688 * When there's no device attached to the first downstream port of the
2689 * 4726, "Config Disk" appears, which is a pseudo ATA device to
2690 * configure the 4726. However, ATA emulation of the device is very
2691 * lame. It doesn't send signature D2H Reg FIS after the initial
2692 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2694 * The following function works around the problem by always using
2695 * hardreset on the port and not depending on receiving signature FIS
2696 * afterward. If signature FIS isn't received soon, ATA class is
2697 * assumed without follow-up softreset.
2699 static void ahci_p5wdh_workaround(struct ata_host *host)
2701 static struct dmi_system_id sysids[] = {
2703 .ident = "P5W DH Deluxe",
2705 DMI_MATCH(DMI_SYS_VENDOR,
2706 "ASUSTEK COMPUTER INC"),
2707 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2712 struct pci_dev *pdev = to_pci_dev(host->dev);
2714 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2715 dmi_check_system(sysids)) {
2716 struct ata_port *ap = host->ports[1];
2718 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2719 "Deluxe on-board SIMG4726 workaround\n");
2721 ap->ops = &ahci_p5wdh_ops;
2722 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2726 /* only some SB600 ahci controllers can do 64bit DMA */
2727 static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
2729 static const struct dmi_system_id sysids[] = {
2731 * The oldest version known to be broken is 0901 and
2732 * working is 1501 which was released on 2007-10-26.
2733 * Enable 64bit DMA on 1501 and anything newer.
2735 * Please read bko#9412 for more info.
2738 .ident = "ASUS M2A-VM",
2740 DMI_MATCH(DMI_BOARD_VENDOR,
2741 "ASUSTeK Computer INC."),
2742 DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
2744 .driver_data = "20071026", /* yyyymmdd */
2747 * All BIOS versions for the MSI K9A2 Platinum (MS-7376)
2748 * support 64bit DMA.
2750 * BIOS versions earlier than 1.5 had the Manufacturer DMI
2751 * fields as "MICRO-STAR INTERANTIONAL CO.,LTD".
2752 * This spelling mistake was fixed in BIOS version 1.5, so
2753 * 1.5 and later have the Manufacturer as
2754 * "MICRO-STAR INTERNATIONAL CO.,LTD".
2755 * So try to match on DMI_BOARD_VENDOR of "MICRO-STAR INTER".
2757 * BIOS versions earlier than 1.9 had a Board Product Name
2758 * DMI field of "MS-7376". This was changed to be
2759 * "K9A2 Platinum (MS-7376)" in version 1.9, but we can still
2760 * match on DMI_BOARD_NAME of "MS-7376".
2763 .ident = "MSI K9A2 Platinum",
2765 DMI_MATCH(DMI_BOARD_VENDOR,
2766 "MICRO-STAR INTER"),
2767 DMI_MATCH(DMI_BOARD_NAME, "MS-7376"),
2772 const struct dmi_system_id *match;
2773 int year, month, date;
2776 match = dmi_first_match(sysids);
2777 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
2781 if (!match->driver_data)
2784 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
2785 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
2787 if (strcmp(buf, match->driver_data) >= 0)
2790 dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
2791 "forcing 32bit DMA, update BIOS\n", match->ident);
2796 dev_printk(KERN_WARNING, &pdev->dev, "%s: enabling 64bit DMA\n",
2801 static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
2803 static const struct dmi_system_id broken_systems[] = {
2805 .ident = "HP Compaq nx6310",
2807 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2808 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
2810 /* PCI slot number of the controller */
2811 .driver_data = (void *)0x1FUL,
2814 .ident = "HP Compaq 6720s",
2816 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2817 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
2819 /* PCI slot number of the controller */
2820 .driver_data = (void *)0x1FUL,
2823 { } /* terminate list */
2825 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
2828 unsigned long slot = (unsigned long)dmi->driver_data;
2829 /* apply the quirk only to on-board controllers */
2830 return slot == PCI_SLOT(pdev->devfn);
2836 static bool ahci_broken_suspend(struct pci_dev *pdev)
2838 static const struct dmi_system_id sysids[] = {
2840 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
2841 * to the harddisk doesn't become online after
2842 * resuming from STR. Warn and fail suspend.
2844 * http://bugzilla.kernel.org/show_bug.cgi?id=12276
2846 * Use dates instead of versions to match as HP is
2847 * apparently recycling both product and version
2850 * http://bugzilla.kernel.org/show_bug.cgi?id=15462
2855 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2856 DMI_MATCH(DMI_PRODUCT_NAME,
2857 "HP Pavilion dv4 Notebook PC"),
2859 .driver_data = "20090105", /* F.30 */
2864 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2865 DMI_MATCH(DMI_PRODUCT_NAME,
2866 "HP Pavilion dv5 Notebook PC"),
2868 .driver_data = "20090506", /* F.16 */
2873 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2874 DMI_MATCH(DMI_PRODUCT_NAME,
2875 "HP Pavilion dv6 Notebook PC"),
2877 .driver_data = "20090423", /* F.21 */
2882 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2883 DMI_MATCH(DMI_PRODUCT_NAME,
2884 "HP HDX18 Notebook PC"),
2886 .driver_data = "20090430", /* F.23 */
2889 * Acer eMachines G725 has the same problem. BIOS
2890 * V1.03 is known to be broken. V3.04 is known to
2891 * work. Inbetween, there are V1.06, V2.06 and V3.03
2892 * that we don't have much idea about. For now,
2893 * blacklist anything older than V3.04.
2895 * http://bugzilla.kernel.org/show_bug.cgi?id=15104
2900 DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
2901 DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
2903 .driver_data = "20091216", /* V3.04 */
2905 { } /* terminate list */
2907 const struct dmi_system_id *dmi = dmi_first_match(sysids);
2908 int year, month, date;
2911 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
2914 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
2915 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
2917 return strcmp(buf, dmi->driver_data) < 0;
2920 static bool ahci_broken_online(struct pci_dev *pdev)
2922 #define ENCODE_BUSDEVFN(bus, slot, func) \
2923 (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
2924 static const struct dmi_system_id sysids[] = {
2926 * There are several gigabyte boards which use
2927 * SIMG5723s configured as hardware RAID. Certain
2928 * 5723 firmware revisions shipped there keep the link
2929 * online but fail to answer properly to SRST or
2930 * IDENTIFY when no device is attached downstream
2931 * causing libata to retry quite a few times leading
2932 * to excessive detection delay.
2934 * As these firmwares respond to the second reset try
2935 * with invalid device signature, considering unknown
2936 * sig as offline works around the problem acceptably.
2939 .ident = "EP45-DQ6",
2941 DMI_MATCH(DMI_BOARD_VENDOR,
2942 "Gigabyte Technology Co., Ltd."),
2943 DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
2945 .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
2948 .ident = "EP45-DS5",
2950 DMI_MATCH(DMI_BOARD_VENDOR,
2951 "Gigabyte Technology Co., Ltd."),
2952 DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
2954 .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
2956 { } /* terminate list */
2958 #undef ENCODE_BUSDEVFN
2959 const struct dmi_system_id *dmi = dmi_first_match(sysids);
2965 val = (unsigned long)dmi->driver_data;
2967 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
2970 #ifdef CONFIG_ATA_ACPI
2971 static void ahci_gtf_filter_workaround(struct ata_host *host)
2973 static const struct dmi_system_id sysids[] = {
2975 * Aspire 3810T issues a bunch of SATA enable commands
2976 * via _GTF including an invalid one and one which is
2977 * rejected by the device. Among the successful ones
2978 * is FPDMA non-zero offset enable which when enabled
2979 * only on the drive side leads to NCQ command
2980 * failures. Filter it out.
2983 .ident = "Aspire 3810T",
2985 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
2986 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3810T"),
2988 .driver_data = (void *)ATA_ACPI_FILTER_FPDMA_OFFSET,
2992 const struct dmi_system_id *dmi = dmi_first_match(sysids);
2993 unsigned int filter;
2999 filter = (unsigned long)dmi->driver_data;
3000 dev_printk(KERN_INFO, host->dev,
3001 "applying extra ACPI _GTF filter 0x%x for %s\n",
3002 filter, dmi->ident);
3004 for (i = 0; i < host->n_ports; i++) {
3005 struct ata_port *ap = host->ports[i];
3006 struct ata_link *link;
3007 struct ata_device *dev;
3009 ata_for_each_link(link, ap, EDGE)
3010 ata_for_each_dev(dev, link, ALL)
3011 dev->gtf_filter |= filter;
3015 static inline void ahci_gtf_filter_workaround(struct ata_host *host)
3019 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3021 static int printed_version;
3022 unsigned int board_id = ent->driver_data;
3023 struct ata_port_info pi = ahci_port_info[board_id];
3024 const struct ata_port_info *ppi[] = { &pi, NULL };
3025 struct device *dev = &pdev->dev;
3026 struct ahci_host_priv *hpriv;
3027 struct ata_host *host;
3032 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
3034 if (!printed_version++)
3035 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
3037 /* The AHCI driver can only drive the SATA ports, the PATA driver
3038 can drive them all so if both drivers are selected make sure
3039 AHCI stays out of the way */
3040 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
3044 * For some reason, MCP89 on MacBook 7,1 doesn't work with
3045 * ahci, use ata_generic instead.
3047 if (pdev->vendor == PCI_VENDOR_ID_NVIDIA &&
3048 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA &&
3049 pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
3050 pdev->subsystem_device == 0xcb89)
3053 /* acquire resources */
3054 rc = pcim_enable_device(pdev);
3058 /* AHCI controllers often implement SFF compatible interface.
3059 * Grab all PCI BARs just in case.
3061 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
3063 pcim_pin_device(pdev);
3067 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3068 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
3071 /* ICH6s share the same PCI ID for both piix and ahci
3072 * modes. Enabling ahci mode while MAP indicates
3073 * combined mode is a bad idea. Yield to ata_piix.
3075 pci_read_config_byte(pdev, ICH_MAP, &map);
3077 dev_printk(KERN_INFO, &pdev->dev, "controller is in "
3078 "combined mode, can't enable AHCI mode\n");
3083 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
3086 hpriv->flags |= (unsigned long)pi.private_data;
3088 /* MCP65 revision A1 and A2 can't do MSI */
3089 if (board_id == board_ahci_mcp65 &&
3090 (pdev->revision == 0xa1 || pdev->revision == 0xa2))
3091 hpriv->flags |= AHCI_HFLAG_NO_MSI;
3093 /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
3094 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
3095 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
3097 /* only some SB600s can do 64bit DMA */
3098 if (ahci_sb600_enable_64bit(pdev))
3099 hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
3101 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
3104 /* save initial config */
3105 ahci_save_initial_config(pdev, hpriv);
3108 if (hpriv->cap & HOST_CAP_NCQ) {
3109 pi.flags |= ATA_FLAG_NCQ;
3110 /* Auto-activate optimization is supposed to be supported on
3111 all AHCI controllers indicating NCQ support, but it seems
3112 to be broken at least on some NVIDIA MCP79 chipsets.
3113 Until we get info on which NVIDIA chipsets don't have this
3114 issue, if any, disable AA on all NVIDIA AHCIs. */
3115 if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
3116 pi.flags |= ATA_FLAG_FPDMA_AA;
3119 if (hpriv->cap & HOST_CAP_PMP)
3120 pi.flags |= ATA_FLAG_PMP;
3122 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
3124 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
3125 u32 em_loc = readl(mmio + HOST_EM_LOC);
3126 u32 em_ctl = readl(mmio + HOST_EM_CTL);
3128 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
3130 /* we only support LED message type right now */
3131 if ((messages & 0x01) && (ahci_em_messages == 1)) {
3133 hpriv->em_loc = ((em_loc >> 16) * 4);
3134 pi.flags |= ATA_FLAG_EM;
3135 if (!(em_ctl & EM_CTL_ALHD))
3136 pi.flags |= ATA_FLAG_SW_ACTIVITY;
3140 if (ahci_broken_system_poweroff(pdev)) {
3141 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
3142 dev_info(&pdev->dev,
3143 "quirky BIOS, skipping spindown on poweroff\n");
3146 if (ahci_broken_suspend(pdev)) {
3147 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
3148 dev_printk(KERN_WARNING, &pdev->dev,
3149 "BIOS update required for suspend/resume\n");
3152 if (ahci_broken_online(pdev)) {
3153 hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
3154 dev_info(&pdev->dev,
3155 "online status unreliable, applying workaround\n");
3158 /* CAP.NP sometimes indicate the index of the last enabled
3159 * port, at other times, that of the last possible port, so
3160 * determining the maximum port number requires looking at
3161 * both CAP.NP and port_map.
3163 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
3165 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3168 host->iomap = pcim_iomap_table(pdev);
3169 host->private_data = hpriv;
3171 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
3172 host->flags |= ATA_HOST_PARALLEL_SCAN;
3174 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
3176 if (pi.flags & ATA_FLAG_EM)
3177 ahci_reset_em(host);
3179 for (i = 0; i < host->n_ports; i++) {
3180 struct ata_port *ap = host->ports[i];
3182 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
3183 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
3184 0x100 + ap->port_no * 0x80, "port");
3186 /* set initial link pm policy */
3187 ap->pm_policy = NOT_AVAILABLE;
3189 /* set enclosure management message type */
3190 if (ap->flags & ATA_FLAG_EM)
3191 ap->em_message_type = ahci_em_messages;
3194 /* disabled/not-implemented port */
3195 if (!(hpriv->port_map & (1 << i)))
3196 ap->ops = &ata_dummy_port_ops;
3199 /* apply workaround for ASUS P5W DH Deluxe mainboard */
3200 ahci_p5wdh_workaround(host);
3202 /* apply gtf filter quirk */
3203 ahci_gtf_filter_workaround(host);
3205 /* initialize adapter */
3206 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
3210 rc = ahci_reset_controller(host);
3214 ahci_init_controller(host);
3215 ahci_print_info(host);
3217 pci_set_master(pdev);
3218 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
3222 static int __init ahci_init(void)
3224 return pci_register_driver(&ahci_pci_driver);
3227 static void __exit ahci_exit(void)
3229 pci_unregister_driver(&ahci_pci_driver);
3233 MODULE_AUTHOR("Jeff Garzik");
3234 MODULE_DESCRIPTION("AHCI SATA low-level driver");
3235 MODULE_LICENSE("GPL");
3236 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
3237 MODULE_VERSION(DRV_VERSION);
3239 module_init(ahci_init);
3240 module_exit(ahci_exit);