blob: c01d6bf4c593b3e54612940ea7188c77cbfe7877 [file] [log] [blame]
Brett Russ20f733e72005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Mark Lorde12bef52008-03-31 19:33:56 -04004 * Copyright 2008: Marvell Corporation, all rights reserved.
Jeff Garzik8b260242005-11-12 12:32:50 -05005 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05006 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e72005-09-01 18:26:17 -04007 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
Jeff Garzik4a05e202007-05-24 23:40:15 -040025/*
26 sata_mv TODO list:
27
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
32
Mark Lord1fd2e1c2008-01-26 18:33:59 -050033 2) Improve/fix IRQ and error handling sequences.
34
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040040
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42
Mark Lorde49856d2008-04-16 14:59:07 -040043 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
Jeff Garzik4a05e202007-05-24 23:40:15 -040044
Mark Lord40f0bc22008-04-16 14:57:25 -040045 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
Jeff Garzik4a05e202007-05-24 23:40:15 -040046
Jeff Garzik4a05e202007-05-24 23:40:15 -040047 8) Develop a low-power-consumption strategy, and implement it.
48
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 like that.
52
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
57
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
61
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
64
Jeff Garzik4a05e202007-05-24 23:40:15 -040065*/
66
Brett Russ20f733e72005-09-01 18:26:17 -040067#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/pci.h>
70#include <linux/init.h>
71#include <linux/blkdev.h>
72#include <linux/delay.h>
73#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080074#include <linux/dmapool.h>
Brett Russ20f733e72005-09-01 18:26:17 -040075#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050076#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050077#include <linux/platform_device.h>
78#include <linux/ata_platform.h>
Lennert Buytenhek15a32632008-03-27 14:51:39 -040079#include <linux/mbus.h>
Brett Russ20f733e72005-09-01 18:26:17 -040080#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050081#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040082#include <scsi/scsi_device.h>
Brett Russ20f733e72005-09-01 18:26:17 -040083#include <linux/libata.h>
Brett Russ20f733e72005-09-01 18:26:17 -040084
85#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050086#define DRV_VERSION "1.20"
Brett Russ20f733e72005-09-01 18:26:17 -040087
88enum {
89 /* BAR's are enumerated in terms of pci_resource_start() terms */
90 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
91 MV_IO_BAR = 2, /* offset 0x18: IO space */
92 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
93
94 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
95 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
96
97 MV_PCI_REG_BASE = 0,
98 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040099 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
100 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
101 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
102 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
103 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
104
Brett Russ20f733e72005-09-01 18:26:17 -0400105 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500106 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500107 MV_GPIO_PORT_CTL = 0x104f0,
108 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e72005-09-01 18:26:17 -0400109
110 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
112 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
113 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
114
Brett Russ31961942005-09-30 01:36:00 -0400115 MV_MAX_Q_DEPTH = 32,
116 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
117
118 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
119 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400120 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 */
122 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
123 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500124 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400125 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400126
Mark Lord352fab72008-04-19 14:43:42 -0400127 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
Brett Russ20f733e72005-09-01 18:26:17 -0400128 MV_PORT_HC_SHIFT = 2,
Mark Lord352fab72008-04-19 14:43:42 -0400129 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
130 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
131 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
Brett Russ20f733e72005-09-01 18:26:17 -0400132
133 /* Host Flags */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100136 /* SoC integrated controllers, no PCI interface */
Mark Lorde12bef52008-03-31 19:33:56 -0400137 MV_FLAG_SOC = (1 << 28),
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100138
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400139 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400140 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
141 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500142 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e72005-09-01 18:26:17 -0400143
Brett Russ31961942005-09-30 01:36:00 -0400144 CRQB_FLAG_READ = (1 << 0),
145 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400146 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
Mark Lorde12bef52008-03-31 19:33:56 -0400147 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400148 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400149 CRQB_CMD_ADDR_SHIFT = 8,
150 CRQB_CMD_CS = (0x2 << 11),
151 CRQB_CMD_LAST = (1 << 15),
152
153 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400154 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
155 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400156
157 EPRD_FLAG_END_OF_TBL = (1 << 31),
158
Brett Russ20f733e72005-09-01 18:26:17 -0400159 /* PCI interface registers */
160
Brett Russ31961942005-09-30 01:36:00 -0400161 PCI_COMMAND_OFS = 0xc00,
162
Brett Russ20f733e72005-09-01 18:26:17 -0400163 PCI_MAIN_CMD_STS_OFS = 0xd30,
164 STOP_PCI_MASTER = (1 << 2),
165 PCI_MASTER_EMPTY = (1 << 3),
166 GLOB_SFT_RST = (1 << 4),
167
Jeff Garzik522479f2005-11-12 22:14:02 -0500168 MV_PCI_MODE = 0xd00,
169 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
170 MV_PCI_DISC_TIMER = 0xd04,
171 MV_PCI_MSI_TRIGGER = 0xc38,
172 MV_PCI_SERR_MASK = 0xc28,
173 MV_PCI_XBAR_TMOUT = 0x1d04,
174 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
175 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
176 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
177 MV_PCI_ERR_COMMAND = 0x1d50,
178
Mark Lord02a121d2007-12-01 13:07:22 -0500179 PCI_IRQ_CAUSE_OFS = 0x1d58,
180 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e72005-09-01 18:26:17 -0400181 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
182
Mark Lord02a121d2007-12-01 13:07:22 -0500183 PCIE_IRQ_CAUSE_OFS = 0x1900,
184 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500185 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500186
Brett Russ20f733e72005-09-01 18:26:17 -0400187 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
188 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500189 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
190 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Mark Lord352fab72008-04-19 14:43:42 -0400191 ERR_IRQ = (1 << 0), /* shift by port # */
192 DONE_IRQ = (1 << 1), /* shift by port # */
Brett Russ20f733e72005-09-01 18:26:17 -0400193 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
194 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
195 PCI_ERR = (1 << 18),
196 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
197 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500198 PORTS_0_3_COAL_DONE = (1 << 8),
199 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e72005-09-01 18:26:17 -0400200 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
201 GPIO_INT = (1 << 22),
202 SELF_INT = (1 << 23),
203 TWSI_INT = (1 << 24),
204 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500205 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Mark Lorde12bef52008-03-31 19:33:56 -0400206 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500207 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e72005-09-01 18:26:17 -0400208 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
209 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500210 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
211 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500212 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e72005-09-01 18:26:17 -0400213
214 /* SATAHC registers */
215 HC_CFG_OFS = 0,
216
217 HC_IRQ_CAUSE_OFS = 0x14,
Mark Lord352fab72008-04-19 14:43:42 -0400218 DMA_IRQ = (1 << 0), /* shift by port # */
219 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
Brett Russ20f733e72005-09-01 18:26:17 -0400220 DEV_IRQ = (1 << 8), /* shift by port # */
221
222 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400223 SHD_BLK_OFS = 0x100,
224 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e72005-09-01 18:26:17 -0400225
226 /* SATA registers */
227 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
228 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500229 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Mark Lord17c5aab2008-04-16 14:56:51 -0400230
Mark Lorde12bef52008-03-31 19:33:56 -0400231 LTMODE_OFS = 0x30c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400232 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
233
Jeff Garzik47c2b672005-11-12 21:13:17 -0500234 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500235 PHY_MODE4 = 0x314,
236 PHY_MODE2 = 0x330,
Mark Lorde12bef52008-03-31 19:33:56 -0400237 SATA_IFCTL_OFS = 0x344,
238 SATA_IFSTAT_OFS = 0x34c,
239 VENDOR_UNIQUE_FIS_OFS = 0x35c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400240
Mark Lorde12bef52008-03-31 19:33:56 -0400241 FIS_CFG_OFS = 0x360,
Mark Lord17c5aab2008-04-16 14:56:51 -0400242 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
243
Jeff Garzikc9d39132005-11-13 17:47:51 -0500244 MV5_PHY_MODE = 0x74,
245 MV5_LT_MODE = 0x30,
246 MV5_PHY_CTL = 0x0C,
Mark Lorde12bef52008-03-31 19:33:56 -0400247 SATA_INTERFACE_CFG = 0x050,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500248
249 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e72005-09-01 18:26:17 -0400250
251 /* Port registers */
252 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500253 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
254 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
255 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
256 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
257 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Mark Lorde12bef52008-03-31 19:33:56 -0400258 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
259 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
Brett Russ20f733e72005-09-01 18:26:17 -0400260
261 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
262 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400263 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
264 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
265 EDMA_ERR_DEV = (1 << 2), /* device error */
266 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
267 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
268 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400269 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
270 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400271 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400272 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400273 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
274 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
275 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
276 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500277
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400278 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500279 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
280 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
281 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
282 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
283
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400284 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500285
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400286 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500287 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
288 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
289 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
290 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
291 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
292
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400293 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500294
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400295 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400296 EDMA_ERR_OVERRUN_5 = (1 << 5),
297 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500298
299 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
300 EDMA_ERR_LNK_CTRL_RX_1 |
301 EDMA_ERR_LNK_CTRL_RX_3 |
Mark Lord40f0bc22008-04-16 14:57:25 -0400302 EDMA_ERR_LNK_CTRL_TX |
303 /* temporary, until we fix hotplug: */
304 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
Mark Lord646a4da2008-01-26 18:30:37 -0500305
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400306 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
307 EDMA_ERR_PRD_PAR |
308 EDMA_ERR_DEV_DCON |
309 EDMA_ERR_DEV_CON |
310 EDMA_ERR_SERR |
311 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400312 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400313 EDMA_ERR_CRPB_PAR |
314 EDMA_ERR_INTRL_PAR |
315 EDMA_ERR_IORDY |
316 EDMA_ERR_LNK_CTRL_RX_2 |
317 EDMA_ERR_LNK_DATA_RX |
318 EDMA_ERR_LNK_DATA_TX |
319 EDMA_ERR_TRANS_PROTO,
Mark Lorde12bef52008-03-31 19:33:56 -0400320
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400321 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
322 EDMA_ERR_PRD_PAR |
323 EDMA_ERR_DEV_DCON |
324 EDMA_ERR_DEV_CON |
325 EDMA_ERR_OVERRUN_5 |
326 EDMA_ERR_UNDERRUN_5 |
327 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400328 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400329 EDMA_ERR_CRPB_PAR |
330 EDMA_ERR_INTRL_PAR |
331 EDMA_ERR_IORDY,
Brett Russ20f733e72005-09-01 18:26:17 -0400332
Brett Russ31961942005-09-30 01:36:00 -0400333 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
334 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400335
336 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
337 EDMA_REQ_Q_PTR_SHIFT = 5,
338
339 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
340 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
341 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400342 EDMA_RSP_Q_PTR_SHIFT = 3,
343
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400344 EDMA_CMD_OFS = 0x28, /* EDMA command register */
345 EDMA_EN = (1 << 0), /* enable EDMA */
346 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
347 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e72005-09-01 18:26:17 -0400348
Jeff Garzikc9d39132005-11-13 17:47:51 -0500349 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500350 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500351
Mark Lord352fab72008-04-19 14:43:42 -0400352 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
353
Brett Russ31961942005-09-30 01:36:00 -0400354 /* Host private flags (hp_flags) */
355 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500356 MV_HP_ERRATA_50XXB0 = (1 << 1),
357 MV_HP_ERRATA_50XXB2 = (1 << 2),
358 MV_HP_ERRATA_60X1B2 = (1 << 3),
359 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500360 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400361 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
362 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
363 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500364 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e72005-09-01 18:26:17 -0400365
Brett Russ31961942005-09-30 01:36:00 -0400366 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400367 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500368 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Brett Russ31961942005-09-30 01:36:00 -0400369};
370
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400371#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
372#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500373#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100374#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500375
Lennert Buytenhek15a32632008-03-27 14:51:39 -0400376#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
377#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
378
Jeff Garzik095fec82005-11-12 09:50:49 -0500379enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400380 /* DMA boundary 0xffff is required by the s/g splitting
381 * we need on /length/ in mv_fill-sg().
382 */
383 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500384
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400385 /* mask of register bits containing lower 32 bits
386 * of EDMA request queue DMA address
387 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500388 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
389
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400390 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500391 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
392};
393
Jeff Garzik522479f2005-11-12 22:14:02 -0500394enum chip_type {
395 chip_504x,
396 chip_508x,
397 chip_5080,
398 chip_604x,
399 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500400 chip_6042,
401 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500402 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500403};
404
Brett Russ31961942005-09-30 01:36:00 -0400405/* Command ReQuest Block: 32B */
406struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400407 __le32 sg_addr;
408 __le32 sg_addr_hi;
409 __le16 ctrl_flags;
410 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400411};
412
Jeff Garzike4e7b892006-01-31 12:18:41 -0500413struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400414 __le32 addr;
415 __le32 addr_hi;
416 __le32 flags;
417 __le32 len;
418 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500419};
420
Brett Russ31961942005-09-30 01:36:00 -0400421/* Command ResPonse Block: 8B */
422struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400423 __le16 id;
424 __le16 flags;
425 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400426};
427
428/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
429struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400430 __le32 addr;
431 __le32 flags_size;
432 __le32 addr_hi;
433 __le32 reserved;
Brett Russ20f733e72005-09-01 18:26:17 -0400434};
435
436struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400437 struct mv_crqb *crqb;
438 dma_addr_t crqb_dma;
439 struct mv_crpb *crpb;
440 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500441 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
442 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400443
444 unsigned int req_idx;
445 unsigned int resp_idx;
446
Brett Russ31961942005-09-30 01:36:00 -0400447 u32 pp_flags;
Brett Russ20f733e72005-09-01 18:26:17 -0400448};
449
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500450struct mv_port_signal {
451 u32 amps;
452 u32 pre;
453};
454
Mark Lord02a121d2007-12-01 13:07:22 -0500455struct mv_host_priv {
456 u32 hp_flags;
457 struct mv_port_signal signal[8];
458 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500459 int n_ports;
460 void __iomem *base;
461 void __iomem *main_cause_reg_addr;
462 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500463 u32 irq_cause_ofs;
464 u32 irq_mask_ofs;
465 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500466 /*
467 * These consistent DMA memory pools give us guaranteed
468 * alignment for hardware-accessed data structures,
469 * and less memory waste in accomplishing the alignment.
470 */
471 struct dma_pool *crqb_pool;
472 struct dma_pool *crpb_pool;
473 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500474};
475
Jeff Garzik47c2b672005-11-12 21:13:17 -0500476struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500477 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
478 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500479 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
480 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
481 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500482 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
483 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500484 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100485 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500486};
487
Tejun Heoda3dbb12007-07-16 14:29:40 +0900488static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
489static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
490static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
491static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400492static int mv_port_start(struct ata_port *ap);
493static void mv_port_stop(struct ata_port *ap);
494static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500495static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900496static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900497static int mv_hardreset(struct ata_link *link, unsigned int *class,
498 unsigned long deadline);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400499static void mv_eh_freeze(struct ata_port *ap);
500static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500501static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e72005-09-01 18:26:17 -0400502
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500503static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
504 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500505static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
506static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
507 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500508static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
509 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500510static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100511static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500512
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500513static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
514 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500515static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
516static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
517 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500518static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
519 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500520static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500521static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
522 void __iomem *mmio);
523static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
524 void __iomem *mmio);
525static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
526 void __iomem *mmio, unsigned int n_hc);
527static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
528 void __iomem *mmio);
529static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100530static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400531static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500532 unsigned int port_no);
Mark Lorde12bef52008-03-31 19:33:56 -0400533static int mv_stop_edma(struct ata_port *ap);
Mark Lordb5624682008-03-31 19:34:40 -0400534static int mv_stop_edma_engine(void __iomem *port_mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400535static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500536
Mark Lorde49856d2008-04-16 14:59:07 -0400537static void mv_pmp_select(struct ata_port *ap, int pmp);
538static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
539 unsigned long deadline);
540static int mv_softreset(struct ata_link *link, unsigned int *class,
541 unsigned long deadline);
Brett Russ20f733e72005-09-01 18:26:17 -0400542
Mark Lordeb73d552008-01-29 13:24:00 -0500543/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
544 * because we have to allow room for worst case splitting of
545 * PRDs for 64K boundaries in mv_fill_sg().
546 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400547static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900548 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400549 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400550 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400551};
552
553static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900554 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500555 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400556 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e72005-09-01 18:26:17 -0400557 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e72005-09-01 18:26:17 -0400558};
559
Tejun Heo029cfd62008-03-25 12:22:49 +0900560static struct ata_port_operations mv5_ops = {
561 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500562
563 .qc_prep = mv_qc_prep,
564 .qc_issue = mv_qc_issue,
565
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400566 .freeze = mv_eh_freeze,
567 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900568 .hardreset = mv_hardreset,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900569 .error_handler = ata_std_error_handler, /* avoid SFF EH */
Tejun Heo029cfd62008-03-25 12:22:49 +0900570 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400571
Jeff Garzikc9d39132005-11-13 17:47:51 -0500572 .scr_read = mv5_scr_read,
573 .scr_write = mv5_scr_write,
574
575 .port_start = mv_port_start,
576 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500577};
578
Tejun Heo029cfd62008-03-25 12:22:49 +0900579static struct ata_port_operations mv6_ops = {
580 .inherits = &mv5_ops,
Mark Lorde49856d2008-04-16 14:59:07 -0400581 .qc_defer = sata_pmp_qc_defer_cmd_switch,
Mark Lordf2738272008-01-26 18:32:29 -0500582 .dev_config = mv6_dev_config,
Brett Russ20f733e72005-09-01 18:26:17 -0400583 .scr_read = mv_scr_read,
584 .scr_write = mv_scr_write,
585
Mark Lorde49856d2008-04-16 14:59:07 -0400586 .pmp_hardreset = mv_pmp_hardreset,
587 .pmp_softreset = mv_softreset,
588 .softreset = mv_softreset,
589 .error_handler = sata_pmp_error_handler,
Brett Russ20f733e72005-09-01 18:26:17 -0400590};
591
Tejun Heo029cfd62008-03-25 12:22:49 +0900592static struct ata_port_operations mv_iie_ops = {
593 .inherits = &mv6_ops,
Mark Lorde49856d2008-04-16 14:59:07 -0400594 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
Tejun Heo029cfd62008-03-25 12:22:49 +0900595 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500596 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500597};
598
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100599static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e72005-09-01 18:26:17 -0400600 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400601 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400602 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400603 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500604 .port_ops = &mv5_ops,
Brett Russ20f733e72005-09-01 18:26:17 -0400605 },
606 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400607 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400608 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400609 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500610 .port_ops = &mv5_ops,
Brett Russ20f733e72005-09-01 18:26:17 -0400611 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500612 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400613 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500614 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400615 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500616 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500617 },
Brett Russ20f733e72005-09-01 18:26:17 -0400618 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500619 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400620 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500621 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400622 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400623 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500624 .port_ops = &mv6_ops,
Brett Russ20f733e72005-09-01 18:26:17 -0400625 },
626 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400627 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400628 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500629 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400630 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400631 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500632 .port_ops = &mv6_ops,
Brett Russ20f733e72005-09-01 18:26:17 -0400633 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500634 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500635 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400636 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500637 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500638 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400639 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500640 .port_ops = &mv_iie_ops,
641 },
642 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500643 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400644 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500645 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500646 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400647 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500648 .port_ops = &mv_iie_ops,
649 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500650 { /* chip_soc */
Mark Lord02c1f322008-04-16 14:58:13 -0400651 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400652 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord02c1f322008-04-16 14:58:13 -0400653 ATA_FLAG_NCQ | MV_FLAG_SOC,
Mark Lord17c5aab2008-04-16 14:56:51 -0400654 .pio_mask = 0x1f, /* pio0-4 */
655 .udma_mask = ATA_UDMA6,
656 .port_ops = &mv_iie_ops,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500657 },
Brett Russ20f733e72005-09-01 18:26:17 -0400658};
659
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500660static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400661 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
662 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
663 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
664 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100665 /* RocketRAID 1740/174x have different identifiers */
666 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
667 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e72005-09-01 18:26:17 -0400668
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400669 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
670 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
671 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
672 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
673 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500674
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400675 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
676
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200677 /* Adaptec 1430SA */
678 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
679
Mark Lord02a121d2007-12-01 13:07:22 -0500680 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800681 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
682
Mark Lord02a121d2007-12-01 13:07:22 -0500683 /* Highpoint RocketRAID PCIe series */
684 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
685 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
686
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400687 { } /* terminate list */
Brett Russ20f733e72005-09-01 18:26:17 -0400688};
689
Jeff Garzik47c2b672005-11-12 21:13:17 -0500690static const struct mv_hw_ops mv5xxx_ops = {
691 .phy_errata = mv5_phy_errata,
692 .enable_leds = mv5_enable_leds,
693 .read_preamp = mv5_read_preamp,
694 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500695 .reset_flash = mv5_reset_flash,
696 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500697};
698
699static const struct mv_hw_ops mv6xxx_ops = {
700 .phy_errata = mv6_phy_errata,
701 .enable_leds = mv6_enable_leds,
702 .read_preamp = mv6_read_preamp,
703 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500704 .reset_flash = mv6_reset_flash,
705 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500706};
707
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500708static const struct mv_hw_ops mv_soc_ops = {
709 .phy_errata = mv6_phy_errata,
710 .enable_leds = mv_soc_enable_leds,
711 .read_preamp = mv_soc_read_preamp,
712 .reset_hc = mv_soc_reset_hc,
713 .reset_flash = mv_soc_reset_flash,
714 .reset_bus = mv_soc_reset_bus,
715};
716
Brett Russ20f733e72005-09-01 18:26:17 -0400717/*
718 * Functions
719 */
720
721static inline void writelfl(unsigned long data, void __iomem *addr)
722{
723 writel(data, addr);
724 (void) readl(addr); /* flush to avoid PCI posted write */
725}
726
Jeff Garzikc9d39132005-11-13 17:47:51 -0500727static inline unsigned int mv_hc_from_port(unsigned int port)
728{
729 return port >> MV_PORT_HC_SHIFT;
730}
731
732static inline unsigned int mv_hardport_from_port(unsigned int port)
733{
734 return port & MV_PORT_MASK;
735}
736
Mark Lord352fab72008-04-19 14:43:42 -0400737static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
738{
739 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
740}
741
Jeff Garzikc9d39132005-11-13 17:47:51 -0500742static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
743 unsigned int port)
744{
745 return mv_hc_base(base, mv_hc_from_port(port));
746}
747
Brett Russ20f733e72005-09-01 18:26:17 -0400748static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
749{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500750 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500751 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500752 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e72005-09-01 18:26:17 -0400753}
754
Mark Lorde12bef52008-03-31 19:33:56 -0400755static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
756{
757 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
758 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
759
760 return hc_mmio + ofs;
761}
762
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500763static inline void __iomem *mv_host_base(struct ata_host *host)
764{
765 struct mv_host_priv *hpriv = host->private_data;
766 return hpriv->base;
767}
768
Brett Russ20f733e72005-09-01 18:26:17 -0400769static inline void __iomem *mv_ap_base(struct ata_port *ap)
770{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500771 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e72005-09-01 18:26:17 -0400772}
773
Jeff Garzikcca39742006-08-24 03:19:22 -0400774static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e72005-09-01 18:26:17 -0400775{
Jeff Garzikcca39742006-08-24 03:19:22 -0400776 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e72005-09-01 18:26:17 -0400777}
778
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400779static void mv_set_edma_ptrs(void __iomem *port_mmio,
780 struct mv_host_priv *hpriv,
781 struct mv_port_priv *pp)
782{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400783 u32 index;
784
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400785 /*
786 * initialize request queue
787 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400788 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
789
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400790 WARN_ON(pp->crqb_dma & 0x3ff);
791 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400792 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400793 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
794
795 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400796 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400797 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
798 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400799 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400800
801 /*
802 * initialize response queue
803 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400804 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
805
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400806 WARN_ON(pp->crpb_dma & 0xff);
807 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
808
809 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400810 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400811 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
812 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400813 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400814
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400815 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400816 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400817}
818
Brett Russ05b308e2005-10-05 17:08:53 -0400819/**
820 * mv_start_dma - Enable eDMA engine
821 * @base: port base address
822 * @pp: port private data
823 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900824 * Verify the local cache of the eDMA state is accurate with a
825 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400826 *
827 * LOCKING:
828 * Inherited from caller.
829 */
Mark Lord0c589122008-01-26 18:31:16 -0500830static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500831 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400832{
Mark Lord72109162008-01-26 18:31:33 -0500833 int want_ncq = (protocol == ATA_PROT_NCQ);
834
835 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
836 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
837 if (want_ncq != using_ncq)
Mark Lordb5624682008-03-31 19:34:40 -0400838 mv_stop_edma(ap);
Mark Lord72109162008-01-26 18:31:33 -0500839 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400840 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500841 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lord352fab72008-04-19 14:43:42 -0400842 int hardport = mv_hardport_from_port(ap->port_no);
Mark Lord0c589122008-01-26 18:31:16 -0500843 void __iomem *hc_mmio = mv_hc_base_from_port(
Mark Lord352fab72008-04-19 14:43:42 -0400844 mv_host_base(ap->host), hardport);
Mark Lord0c589122008-01-26 18:31:16 -0500845 u32 hc_irq_cause, ipending;
846
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400847 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500848 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400849
Mark Lord0c589122008-01-26 18:31:16 -0500850 /* clear EDMA interrupt indicator, if any */
851 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Mark Lord352fab72008-04-19 14:43:42 -0400852 ipending = (DEV_IRQ | DMA_IRQ) << hardport;
Mark Lord0c589122008-01-26 18:31:16 -0500853 if (hc_irq_cause & ipending) {
854 writelfl(hc_irq_cause & ~ipending,
855 hc_mmio + HC_IRQ_CAUSE_OFS);
856 }
857
Mark Lorde12bef52008-03-31 19:33:56 -0400858 mv_edma_cfg(ap, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500859
860 /* clear FIS IRQ Cause */
861 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
862
Mark Lordf630d562008-01-26 18:31:00 -0500863 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400864
Mark Lordf630d562008-01-26 18:31:00 -0500865 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400866 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
867 }
Brett Russ31961942005-09-30 01:36:00 -0400868}
869
Brett Russ05b308e2005-10-05 17:08:53 -0400870/**
Mark Lorde12bef52008-03-31 19:33:56 -0400871 * mv_stop_edma_engine - Disable eDMA engine
Mark Lordb5624682008-03-31 19:34:40 -0400872 * @port_mmio: io base address
Brett Russ05b308e2005-10-05 17:08:53 -0400873 *
874 * LOCKING:
875 * Inherited from caller.
876 */
Mark Lordb5624682008-03-31 19:34:40 -0400877static int mv_stop_edma_engine(void __iomem *port_mmio)
Brett Russ31961942005-09-30 01:36:00 -0400878{
Mark Lordb5624682008-03-31 19:34:40 -0400879 int i;
Brett Russ31961942005-09-30 01:36:00 -0400880
Mark Lordb5624682008-03-31 19:34:40 -0400881 /* Disable eDMA. The disable bit auto clears. */
882 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
Jeff Garzik8b260242005-11-12 12:32:50 -0500883
Mark Lordb5624682008-03-31 19:34:40 -0400884 /* Wait for the chip to confirm eDMA is off. */
885 for (i = 10000; i > 0; i--) {
886 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb52007-07-12 14:30:19 -0400887 if (!(reg & EDMA_EN))
Mark Lordb5624682008-03-31 19:34:40 -0400888 return 0;
889 udelay(10);
Brett Russ31961942005-09-30 01:36:00 -0400890 }
Mark Lordb5624682008-03-31 19:34:40 -0400891 return -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400892}
893
Mark Lorde12bef52008-03-31 19:33:56 -0400894static int mv_stop_edma(struct ata_port *ap)
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400895{
Mark Lordb5624682008-03-31 19:34:40 -0400896 void __iomem *port_mmio = mv_ap_base(ap);
897 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400898
Mark Lordb5624682008-03-31 19:34:40 -0400899 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
900 return 0;
901 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
902 if (mv_stop_edma_engine(port_mmio)) {
903 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
904 return -EIO;
905 }
906 return 0;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400907}
908
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400909#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400910static void mv_dump_mem(void __iomem *start, unsigned bytes)
911{
Brett Russ31961942005-09-30 01:36:00 -0400912 int b, w;
913 for (b = 0; b < bytes; ) {
914 DPRINTK("%p: ", start + b);
915 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400916 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400917 b += sizeof(u32);
918 }
919 printk("\n");
920 }
Brett Russ31961942005-09-30 01:36:00 -0400921}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400922#endif
923
Brett Russ31961942005-09-30 01:36:00 -0400924static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
925{
926#ifdef ATA_DEBUG
927 int b, w;
928 u32 dw;
929 for (b = 0; b < bytes; ) {
930 DPRINTK("%02x: ", b);
931 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400932 (void) pci_read_config_dword(pdev, b, &dw);
933 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400934 b += sizeof(u32);
935 }
936 printk("\n");
937 }
938#endif
939}
940static void mv_dump_all_regs(void __iomem *mmio_base, int port,
941 struct pci_dev *pdev)
942{
943#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500944 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400945 port >> MV_PORT_HC_SHIFT);
946 void __iomem *port_base;
947 int start_port, num_ports, p, start_hc, num_hcs, hc;
948
949 if (0 > port) {
950 start_hc = start_port = 0;
951 num_ports = 8; /* shld be benign for 4 port devs */
952 num_hcs = 2;
953 } else {
954 start_hc = port >> MV_PORT_HC_SHIFT;
955 start_port = port;
956 num_ports = num_hcs = 1;
957 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500958 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400959 num_ports > 1 ? num_ports - 1 : start_port);
960
961 if (NULL != pdev) {
962 DPRINTK("PCI config space regs:\n");
963 mv_dump_pci_cfg(pdev, 0x68);
964 }
965 DPRINTK("PCI regs:\n");
966 mv_dump_mem(mmio_base+0xc00, 0x3c);
967 mv_dump_mem(mmio_base+0xd00, 0x34);
968 mv_dump_mem(mmio_base+0xf00, 0x4);
969 mv_dump_mem(mmio_base+0x1d00, 0x6c);
970 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c37e2006-04-10 23:20:22 -0700971 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400972 DPRINTK("HC regs (HC %i):\n", hc);
973 mv_dump_mem(hc_base, 0x1c);
974 }
975 for (p = start_port; p < start_port + num_ports; p++) {
976 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400977 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400978 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400979 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400980 mv_dump_mem(port_base+0x300, 0x60);
981 }
982#endif
983}
984
Brett Russ20f733e72005-09-01 18:26:17 -0400985static unsigned int mv_scr_offset(unsigned int sc_reg_in)
986{
987 unsigned int ofs;
988
989 switch (sc_reg_in) {
990 case SCR_STATUS:
991 case SCR_CONTROL:
992 case SCR_ERROR:
993 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
994 break;
995 case SCR_ACTIVE:
996 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
997 break;
998 default:
999 ofs = 0xffffffffU;
1000 break;
1001 }
1002 return ofs;
1003}
1004
Tejun Heoda3dbb12007-07-16 14:29:40 +09001005static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e72005-09-01 18:26:17 -04001006{
1007 unsigned int ofs = mv_scr_offset(sc_reg_in);
1008
Tejun Heoda3dbb12007-07-16 14:29:40 +09001009 if (ofs != 0xffffffffU) {
1010 *val = readl(mv_ap_base(ap) + ofs);
1011 return 0;
1012 } else
1013 return -EINVAL;
Brett Russ20f733e72005-09-01 18:26:17 -04001014}
1015
Tejun Heoda3dbb12007-07-16 14:29:40 +09001016static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e72005-09-01 18:26:17 -04001017{
1018 unsigned int ofs = mv_scr_offset(sc_reg_in);
1019
Tejun Heoda3dbb12007-07-16 14:29:40 +09001020 if (ofs != 0xffffffffU) {
Brett Russ20f733e72005-09-01 18:26:17 -04001021 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001022 return 0;
1023 } else
1024 return -EINVAL;
Brett Russ20f733e72005-09-01 18:26:17 -04001025}
1026
Mark Lordf2738272008-01-26 18:32:29 -05001027static void mv6_dev_config(struct ata_device *adev)
1028{
1029 /*
Mark Lorde49856d2008-04-16 14:59:07 -04001030 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1031 *
1032 * Gen-II does not support NCQ over a port multiplier
1033 * (no FIS-based switching).
1034 *
Mark Lordf2738272008-01-26 18:32:29 -05001035 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1036 * See mv_qc_prep() for more info.
1037 */
Mark Lorde49856d2008-04-16 14:59:07 -04001038 if (adev->flags & ATA_DFLAG_NCQ) {
Mark Lord352fab72008-04-19 14:43:42 -04001039 if (sata_pmp_attached(adev->link->ap)) {
Mark Lorde49856d2008-04-16 14:59:07 -04001040 adev->flags &= ~ATA_DFLAG_NCQ;
Mark Lord352fab72008-04-19 14:43:42 -04001041 ata_dev_printk(adev, KERN_INFO,
1042 "NCQ disabled for command-based switching\n");
1043 } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
1044 adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
1045 ata_dev_printk(adev, KERN_INFO,
1046 "max_sectors limited to %u for NCQ\n",
1047 adev->max_sectors);
1048 }
Mark Lorde49856d2008-04-16 14:59:07 -04001049 }
Mark Lordf2738272008-01-26 18:32:29 -05001050}
1051
Mark Lorde49856d2008-04-16 14:59:07 -04001052static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1053{
1054 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1055 /*
1056 * Various bit settings required for operation
1057 * in FIS-based switching (fbs) mode on GenIIe:
1058 */
1059 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1060 old_ltmode = readl(port_mmio + LTMODE_OFS);
1061 if (enable_fbs) {
1062 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1063 new_ltmode = old_ltmode | LTMODE_BIT8;
1064 } else { /* disable fbs */
1065 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1066 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1067 }
1068 if (new_fcfg != old_fcfg)
1069 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1070 if (new_ltmode != old_ltmode)
1071 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
Mark Lord0c589122008-01-26 18:31:16 -05001072}
Jeff Garzike4e7b892006-01-31 12:18:41 -05001073
Mark Lorde12bef52008-03-31 19:33:56 -04001074static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001075{
1076 u32 cfg;
Mark Lorde12bef52008-03-31 19:33:56 -04001077 struct mv_port_priv *pp = ap->private_data;
1078 struct mv_host_priv *hpriv = ap->host->private_data;
1079 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001080
1081 /* set up non-NCQ EDMA configuration */
1082 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1083
1084 if (IS_GEN_I(hpriv))
1085 cfg |= (1 << 8); /* enab config burst size mask */
1086
1087 else if (IS_GEN_II(hpriv))
1088 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1089
1090 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001091 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1092 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001093 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001094 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Mark Lorde49856d2008-04-16 14:59:07 -04001095
1096 if (want_ncq && sata_pmp_attached(ap)) {
1097 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1098 mv_config_fbs(port_mmio, 1);
1099 } else {
1100 mv_config_fbs(port_mmio, 0);
1101 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001102 }
1103
Mark Lord72109162008-01-26 18:31:33 -05001104 if (want_ncq) {
1105 cfg |= EDMA_CFG_NCQ;
1106 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1107 } else
1108 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1109
Jeff Garzike4e7b892006-01-31 12:18:41 -05001110 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1111}
1112
Mark Lordda2fa9b2008-01-26 18:32:45 -05001113static void mv_port_free_dma_mem(struct ata_port *ap)
1114{
1115 struct mv_host_priv *hpriv = ap->host->private_data;
1116 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001117 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001118
1119 if (pp->crqb) {
1120 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1121 pp->crqb = NULL;
1122 }
1123 if (pp->crpb) {
1124 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1125 pp->crpb = NULL;
1126 }
Mark Lordeb73d552008-01-29 13:24:00 -05001127 /*
1128 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1129 * For later hardware, we have one unique sg_tbl per NCQ tag.
1130 */
1131 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1132 if (pp->sg_tbl[tag]) {
1133 if (tag == 0 || !IS_GEN_I(hpriv))
1134 dma_pool_free(hpriv->sg_tbl_pool,
1135 pp->sg_tbl[tag],
1136 pp->sg_tbl_dma[tag]);
1137 pp->sg_tbl[tag] = NULL;
1138 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001139 }
1140}
1141
Brett Russ05b308e2005-10-05 17:08:53 -04001142/**
1143 * mv_port_start - Port specific init/start routine.
1144 * @ap: ATA channel to manipulate
1145 *
1146 * Allocate and point to DMA memory, init port private memory,
1147 * zero indices.
1148 *
1149 * LOCKING:
1150 * Inherited from caller.
1151 */
Brett Russ31961942005-09-30 01:36:00 -04001152static int mv_port_start(struct ata_port *ap)
1153{
Jeff Garzikcca39742006-08-24 03:19:22 -04001154 struct device *dev = ap->host->dev;
1155 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001156 struct mv_port_priv *pp;
James Bottomleydde20202008-02-19 11:36:56 +01001157 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001158
Tejun Heo24dc5f32007-01-20 16:00:28 +09001159 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001160 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001161 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001162 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001163
Mark Lordda2fa9b2008-01-26 18:32:45 -05001164 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1165 if (!pp->crqb)
1166 return -ENOMEM;
1167 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001168
Mark Lordda2fa9b2008-01-26 18:32:45 -05001169 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1170 if (!pp->crpb)
1171 goto out_port_free_dma_mem;
1172 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001173
Mark Lordeb73d552008-01-29 13:24:00 -05001174 /*
1175 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1176 * For later hardware, we need one unique sg_tbl per NCQ tag.
1177 */
1178 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1179 if (tag == 0 || !IS_GEN_I(hpriv)) {
1180 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1181 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1182 if (!pp->sg_tbl[tag])
1183 goto out_port_free_dma_mem;
1184 } else {
1185 pp->sg_tbl[tag] = pp->sg_tbl[0];
1186 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1187 }
1188 }
Brett Russ31961942005-09-30 01:36:00 -04001189 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001190
1191out_port_free_dma_mem:
1192 mv_port_free_dma_mem(ap);
1193 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001194}
1195
Brett Russ05b308e2005-10-05 17:08:53 -04001196/**
1197 * mv_port_stop - Port specific cleanup/stop routine.
1198 * @ap: ATA channel to manipulate
1199 *
1200 * Stop DMA, cleanup port memory.
1201 *
1202 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001203 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001204 */
Brett Russ31961942005-09-30 01:36:00 -04001205static void mv_port_stop(struct ata_port *ap)
1206{
Mark Lorde12bef52008-03-31 19:33:56 -04001207 mv_stop_edma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001208 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001209}
1210
Brett Russ05b308e2005-10-05 17:08:53 -04001211/**
1212 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1213 * @qc: queued command whose SG list to source from
1214 *
1215 * Populate the SG list and mark the last entry.
1216 *
1217 * LOCKING:
1218 * Inherited from caller.
1219 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001220static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001221{
1222 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001223 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001224 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001225 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001226
Mark Lordeb73d552008-01-29 13:24:00 -05001227 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001228 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001229 dma_addr_t addr = sg_dma_address(sg);
1230 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001231
Olof Johansson4007b492007-10-02 20:45:27 -05001232 while (sg_len) {
1233 u32 offset = addr & 0xffff;
1234 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001235
Olof Johansson4007b492007-10-02 20:45:27 -05001236 if ((offset + sg_len > 0x10000))
1237 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001238
Olof Johansson4007b492007-10-02 20:45:27 -05001239 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1240 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001241 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001242
1243 sg_len -= len;
1244 addr += len;
1245
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001246 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001247 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001248 }
Brett Russ31961942005-09-30 01:36:00 -04001249 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001250
1251 if (likely(last_sg))
1252 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001253}
1254
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001255static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001256{
Mark Lord559eeda2006-05-19 16:40:15 -04001257 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001258 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001259 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001260}
1261
Brett Russ05b308e2005-10-05 17:08:53 -04001262/**
1263 * mv_qc_prep - Host specific command preparation.
1264 * @qc: queued command to prepare
1265 *
1266 * This routine simply redirects to the general purpose routine
1267 * if command is not DMA. Else, it handles prep of the CRQB
1268 * (command request block), does some sanity checking, and calls
1269 * the SG load routine.
1270 *
1271 * LOCKING:
1272 * Inherited from caller.
1273 */
Brett Russ31961942005-09-30 01:36:00 -04001274static void mv_qc_prep(struct ata_queued_cmd *qc)
1275{
1276 struct ata_port *ap = qc->ap;
1277 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001278 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001279 struct ata_taskfile *tf;
1280 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001281 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001282
Mark Lord138bfdd2008-01-26 18:33:18 -05001283 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1284 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001285 return;
Brett Russ20f733e72005-09-01 18:26:17 -04001286
Brett Russ31961942005-09-30 01:36:00 -04001287 /* Fill in command request block
1288 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001289 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001290 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001291 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001292 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001293 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001294
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001295 /* get current queue index from software */
1296 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001297
Mark Lorda6432432006-05-19 16:36:36 -04001298 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001299 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001300 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001301 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001302 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1303
1304 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001305 tf = &qc->tf;
1306
1307 /* Sadly, the CRQB cannot accomodate all registers--there are
1308 * only 11 bytes...so we must pick and choose required
1309 * registers based on the command. So, we drop feature and
1310 * hob_feature for [RW] DMA commands, but they are needed for
1311 * NCQ. NCQ will drop hob_nsect.
1312 */
1313 switch (tf->command) {
1314 case ATA_CMD_READ:
1315 case ATA_CMD_READ_EXT:
1316 case ATA_CMD_WRITE:
1317 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001318 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001319 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1320 break;
Brett Russ31961942005-09-30 01:36:00 -04001321 case ATA_CMD_FPDMA_READ:
1322 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001323 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001324 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1325 break;
Brett Russ31961942005-09-30 01:36:00 -04001326 default:
1327 /* The only other commands EDMA supports in non-queued and
1328 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1329 * of which are defined/used by Linux. If we get here, this
1330 * driver needs work.
1331 *
1332 * FIXME: modify libata to give qc_prep a return value and
1333 * return error here.
1334 */
1335 BUG_ON(tf->command);
1336 break;
1337 }
1338 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1339 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1340 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1341 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1342 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1343 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1344 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1345 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1346 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1347
Jeff Garzike4e7b892006-01-31 12:18:41 -05001348 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001349 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001350 mv_fill_sg(qc);
1351}
1352
1353/**
1354 * mv_qc_prep_iie - Host specific command preparation.
1355 * @qc: queued command to prepare
1356 *
1357 * This routine simply redirects to the general purpose routine
1358 * if command is not DMA. Else, it handles prep of the CRQB
1359 * (command request block), does some sanity checking, and calls
1360 * the SG load routine.
1361 *
1362 * LOCKING:
1363 * Inherited from caller.
1364 */
1365static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1366{
1367 struct ata_port *ap = qc->ap;
1368 struct mv_port_priv *pp = ap->private_data;
1369 struct mv_crqb_iie *crqb;
1370 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001371 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001372 u32 flags = 0;
1373
Mark Lord138bfdd2008-01-26 18:33:18 -05001374 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1375 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001376 return;
1377
Mark Lorde12bef52008-03-31 19:33:56 -04001378 /* Fill in Gen IIE command request block */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001379 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1380 flags |= CRQB_FLAG_READ;
1381
Tejun Heobeec7db2006-02-11 19:11:13 +09001382 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001383 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001384 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001385 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001386
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001387 /* get current queue index from software */
1388 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001389
1390 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001391 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1392 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001393 crqb->flags = cpu_to_le32(flags);
1394
1395 tf = &qc->tf;
1396 crqb->ata_cmd[0] = cpu_to_le32(
1397 (tf->command << 16) |
1398 (tf->feature << 24)
1399 );
1400 crqb->ata_cmd[1] = cpu_to_le32(
1401 (tf->lbal << 0) |
1402 (tf->lbam << 8) |
1403 (tf->lbah << 16) |
1404 (tf->device << 24)
1405 );
1406 crqb->ata_cmd[2] = cpu_to_le32(
1407 (tf->hob_lbal << 0) |
1408 (tf->hob_lbam << 8) |
1409 (tf->hob_lbah << 16) |
1410 (tf->hob_feature << 24)
1411 );
1412 crqb->ata_cmd[3] = cpu_to_le32(
1413 (tf->nsect << 0) |
1414 (tf->hob_nsect << 8)
1415 );
1416
1417 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1418 return;
Brett Russ31961942005-09-30 01:36:00 -04001419 mv_fill_sg(qc);
1420}
1421
Brett Russ05b308e2005-10-05 17:08:53 -04001422/**
1423 * mv_qc_issue - Initiate a command to the host
1424 * @qc: queued command to start
1425 *
1426 * This routine simply redirects to the general purpose routine
1427 * if command is not DMA. Else, it sanity checks our local
1428 * caches of the request producer/consumer indices then enables
1429 * DMA and bumps the request producer index.
1430 *
1431 * LOCKING:
1432 * Inherited from caller.
1433 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001434static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001435{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001436 struct ata_port *ap = qc->ap;
1437 void __iomem *port_mmio = mv_ap_base(ap);
1438 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001439 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001440
Mark Lord138bfdd2008-01-26 18:33:18 -05001441 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1442 (qc->tf.protocol != ATA_PROT_NCQ)) {
Mark Lord17c5aab2008-04-16 14:56:51 -04001443 /*
1444 * We're about to send a non-EDMA capable command to the
Brett Russ31961942005-09-30 01:36:00 -04001445 * port. Turn off EDMA so there won't be problems accessing
1446 * shadow block, etc registers.
1447 */
Mark Lordb5624682008-03-31 19:34:40 -04001448 mv_stop_edma(ap);
Mark Lorde49856d2008-04-16 14:59:07 -04001449 mv_pmp_select(ap, qc->dev->link->pmp);
Tejun Heo9363c382008-04-07 22:47:16 +09001450 return ata_sff_qc_issue(qc);
Brett Russ31961942005-09-30 01:36:00 -04001451 }
1452
Mark Lord72109162008-01-26 18:31:33 -05001453 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001454
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001455 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001456
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001457 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001458
1459 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001460 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1461 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001462
1463 return 0;
1464}
1465
Brett Russ05b308e2005-10-05 17:08:53 -04001466/**
Brett Russ05b308e2005-10-05 17:08:53 -04001467 * mv_err_intr - Handle error interrupts on the port
1468 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001469 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001470 *
1471 * In most cases, just clear the interrupt and move on. However,
Mark Lorde12bef52008-03-31 19:33:56 -04001472 * some cases require an eDMA reset, which also performs a COMRESET.
1473 * The SERR case requires a clear of pending errors in the SATA
1474 * SERROR register. Finally, if the port disabled DMA,
1475 * update our cached copy to match.
Brett Russ05b308e2005-10-05 17:08:53 -04001476 *
1477 * LOCKING:
1478 * Inherited from caller.
1479 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001480static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e72005-09-01 18:26:17 -04001481{
Brett Russ31961942005-09-30 01:36:00 -04001482 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001483 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1484 struct mv_port_priv *pp = ap->private_data;
1485 struct mv_host_priv *hpriv = ap->host->private_data;
1486 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1487 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001488 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e72005-09-01 18:26:17 -04001489
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001490 ata_ehi_clear_desc(ehi);
Brett Russ20f733e72005-09-01 18:26:17 -04001491
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001492 if (!edma_enabled) {
1493 /* just a guess: do we need to do this? should we
1494 * expand this, and do it in all cases?
1495 */
Tejun Heo936fd732007-08-06 18:36:23 +09001496 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1497 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e72005-09-01 18:26:17 -04001498 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001499
1500 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1501
Mark Lord352fab72008-04-19 14:43:42 -04001502 ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001503
1504 /*
Mark Lord352fab72008-04-19 14:43:42 -04001505 * All generations share these EDMA error cause bits:
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001506 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001507 if (edma_err_cause & EDMA_ERR_DEV)
1508 err_mask |= AC_ERR_DEV;
1509 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001510 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001511 EDMA_ERR_INTRL_PAR)) {
1512 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001513 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001514 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001515 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001516 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1517 ata_ehi_hotplugged(ehi);
1518 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001519 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001520 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001521 }
1522
Mark Lord352fab72008-04-19 14:43:42 -04001523 /*
1524 * Gen-I has a different SELF_DIS bit,
1525 * different FREEZE bits, and no SERR bit:
1526 */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001527 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001528 eh_freeze_mask = EDMA_EH_FREEZE_5;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001529 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001530 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001531 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001532 }
1533 } else {
1534 eh_freeze_mask = EDMA_EH_FREEZE;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001535 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001536 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001537 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001538 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001539 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001540 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1541 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001542 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001543 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001544 }
1545 }
Brett Russ20f733e72005-09-01 18:26:17 -04001546
1547 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001548 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e72005-09-01 18:26:17 -04001549
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001550 if (!err_mask) {
1551 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001552 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001553 }
1554
1555 ehi->serror |= serr;
1556 ehi->action |= action;
1557
1558 if (qc)
1559 qc->err_mask |= err_mask;
1560 else
1561 ehi->err_mask |= err_mask;
1562
1563 if (edma_err_cause & eh_freeze_mask)
1564 ata_port_freeze(ap);
1565 else
1566 ata_port_abort(ap);
1567}
1568
1569static void mv_intr_pio(struct ata_port *ap)
1570{
1571 struct ata_queued_cmd *qc;
1572 u8 ata_status;
1573
1574 /* ignore spurious intr if drive still BUSY */
1575 ata_status = readb(ap->ioaddr.status_addr);
1576 if (unlikely(ata_status & ATA_BUSY))
1577 return;
1578
1579 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001580 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001581 if (unlikely(!qc)) /* no active tag */
1582 return;
1583 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1584 return;
1585
1586 /* and finally, complete the ATA command */
1587 qc->err_mask |= ac_err_mask(ata_status);
1588 ata_qc_complete(qc);
1589}
1590
1591static void mv_intr_edma(struct ata_port *ap)
1592{
1593 void __iomem *port_mmio = mv_ap_base(ap);
1594 struct mv_host_priv *hpriv = ap->host->private_data;
1595 struct mv_port_priv *pp = ap->private_data;
1596 struct ata_queued_cmd *qc;
1597 u32 out_index, in_index;
1598 bool work_done = false;
1599
1600 /* get h/w response queue pointer */
1601 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1602 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1603
1604 while (1) {
1605 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001606 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001607
1608 /* get s/w response queue last-read pointer, and compare */
1609 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1610 if (in_index == out_index)
1611 break;
1612
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001613 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001614 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001615 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001616
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001617 /* Gen II/IIE: get active ATA command via tag, to enable
1618 * support for queueing. this works transparently for
1619 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001620 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001621 else
1622 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001623
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001624 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001625
Mark Lordcb924412008-01-26 18:32:09 -05001626 /* For non-NCQ mode, the lower 8 bits of status
1627 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1628 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001629 */
1630 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001631 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001632 mv_err_intr(ap, qc);
1633 return;
1634 }
1635
1636 /* and finally, complete the ATA command */
1637 if (qc) {
1638 qc->err_mask |=
1639 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1640 ata_qc_complete(qc);
1641 }
1642
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001643 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001644 * indicate (after the loop completes) to hardware
1645 * that we have consumed a response queue entry.
1646 */
1647 work_done = true;
1648 pp->resp_idx++;
1649 }
1650
Mark Lord352fab72008-04-19 14:43:42 -04001651 /* Update the software queue position index in hardware */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001652 if (work_done)
1653 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1654 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1655 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e72005-09-01 18:26:17 -04001656}
1657
Brett Russ05b308e2005-10-05 17:08:53 -04001658/**
1659 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001660 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001661 * @relevant: port error bits relevant to this host controller
1662 * @hc: which host controller we're to look at
1663 *
1664 * Read then write clear the HC interrupt status then walk each
1665 * port connected to the HC and see if it needs servicing. Port
1666 * success ints are reported in the HC interrupt status reg, the
1667 * port error ints are reported in the higher level main
1668 * interrupt status register and thus are passed in via the
1669 * 'relevant' argument.
1670 *
1671 * LOCKING:
1672 * Inherited from caller.
1673 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001674static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e72005-09-01 18:26:17 -04001675{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001676 struct mv_host_priv *hpriv = host->private_data;
1677 void __iomem *mmio = hpriv->base;
Brett Russ20f733e72005-09-01 18:26:17 -04001678 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e72005-09-01 18:26:17 -04001679 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001680 int port, port0, last_port;
Brett Russ20f733e72005-09-01 18:26:17 -04001681
Jeff Garzik35177262007-02-24 21:26:42 -05001682 if (hc == 0)
Brett Russ20f733e72005-09-01 18:26:17 -04001683 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001684 else
Brett Russ20f733e72005-09-01 18:26:17 -04001685 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e72005-09-01 18:26:17 -04001686
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001687 if (HAS_PCI(host))
1688 last_port = port0 + MV_PORTS_PER_HC;
1689 else
1690 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e72005-09-01 18:26:17 -04001691 /* we'll need the HC success int register in most cases */
1692 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001693 if (!hc_irq_cause)
1694 return;
1695
1696 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e72005-09-01 18:26:17 -04001697
1698 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001699 hc, relevant, hc_irq_cause);
Brett Russ20f733e72005-09-01 18:26:17 -04001700
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001701 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001702 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001703 struct mv_port_priv *pp;
Mark Lord352fab72008-04-19 14:43:42 -04001704 int have_err_bits, hardport, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001705
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001706 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001707 continue;
1708
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001709 pp = ap->private_data;
1710
Brett Russ31961942005-09-30 01:36:00 -04001711 shift = port << 1; /* (port * 2) */
Mark Lorde12bef52008-03-31 19:33:56 -04001712 if (port >= MV_PORTS_PER_HC)
Brett Russ20f733e72005-09-01 18:26:17 -04001713 shift++; /* skip bit 8 in the HC Main IRQ reg */
Mark Lorde12bef52008-03-31 19:33:56 -04001714
Mark Lord352fab72008-04-19 14:43:42 -04001715 have_err_bits = ((ERR_IRQ << shift) & relevant);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001716
1717 if (unlikely(have_err_bits)) {
1718 struct ata_queued_cmd *qc;
1719
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001720 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001721 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1722 continue;
1723
1724 mv_err_intr(ap, qc);
1725 continue;
Brett Russ20f733e72005-09-01 18:26:17 -04001726 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001727
Mark Lord352fab72008-04-19 14:43:42 -04001728 hardport = mv_hardport_from_port(port); /* range 0..3 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001729
1730 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Mark Lord352fab72008-04-19 14:43:42 -04001731 if ((DMA_IRQ << hardport) & hc_irq_cause)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001732 mv_intr_edma(ap);
1733 } else {
Mark Lord352fab72008-04-19 14:43:42 -04001734 if ((DEV_IRQ << hardport) & hc_irq_cause)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001735 mv_intr_pio(ap);
Brett Russ20f733e72005-09-01 18:26:17 -04001736 }
1737 }
1738 VPRINTK("EXIT\n");
1739}
1740
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001741static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1742{
Mark Lord02a121d2007-12-01 13:07:22 -05001743 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001744 struct ata_port *ap;
1745 struct ata_queued_cmd *qc;
1746 struct ata_eh_info *ehi;
1747 unsigned int i, err_mask, printed = 0;
1748 u32 err_cause;
1749
Mark Lord02a121d2007-12-01 13:07:22 -05001750 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001751
1752 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1753 err_cause);
1754
1755 DPRINTK("All regs @ PCI error\n");
1756 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1757
Mark Lord02a121d2007-12-01 13:07:22 -05001758 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001759
1760 for (i = 0; i < host->n_ports; i++) {
1761 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001762 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001763 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001764 ata_ehi_clear_desc(ehi);
1765 if (!printed++)
1766 ata_ehi_push_desc(ehi,
1767 "PCI err cause 0x%08x", err_cause);
1768 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001769 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001770 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001771 if (qc)
1772 qc->err_mask |= err_mask;
1773 else
1774 ehi->err_mask |= err_mask;
1775
1776 ata_port_freeze(ap);
1777 }
1778 }
1779}
1780
Brett Russ05b308e2005-10-05 17:08:53 -04001781/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001782 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001783 * @irq: unused
1784 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001785 *
1786 * Read the read only register to determine if any host
1787 * controllers have pending interrupts. If so, call lower level
1788 * routine to handle. Also check for PCI errors which are only
1789 * reported here.
1790 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001791 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001792 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001793 * interrupts.
1794 */
David Howells7d12e782006-10-05 14:55:46 +01001795static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e72005-09-01 18:26:17 -04001796{
Jeff Garzikcca39742006-08-24 03:19:22 -04001797 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001798 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e72005-09-01 18:26:17 -04001799 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001800 void __iomem *mmio = hpriv->base;
Mark Lord352fab72008-04-19 14:43:42 -04001801 u32 main_cause, main_mask;
Brett Russ20f733e72005-09-01 18:26:17 -04001802
Mark Lord646a4da2008-01-26 18:30:37 -05001803 spin_lock(&host->lock);
Mark Lord352fab72008-04-19 14:43:42 -04001804 main_cause = readl(hpriv->main_cause_reg_addr);
1805 main_mask = readl(hpriv->main_mask_reg_addr);
1806 /*
1807 * Deal with cases where we either have nothing pending, or have read
1808 * a bogus register value which can indicate HW removal or PCI fault.
Brett Russ20f733e72005-09-01 18:26:17 -04001809 */
Mark Lord352fab72008-04-19 14:43:42 -04001810 if (!(main_cause & main_mask) || (main_cause == 0xffffffffU))
Mark Lord646a4da2008-01-26 18:30:37 -05001811 goto out_unlock;
Brett Russ20f733e72005-09-01 18:26:17 -04001812
Jeff Garzikcca39742006-08-24 03:19:22 -04001813 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e72005-09-01 18:26:17 -04001814
Mark Lord352fab72008-04-19 14:43:42 -04001815 if (unlikely((main_cause & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001816 mv_pci_error(host, mmio);
1817 handled = 1;
1818 goto out_unlock; /* skip all other HC irq handling */
1819 }
1820
Brett Russ20f733e72005-09-01 18:26:17 -04001821 for (hc = 0; hc < n_hcs; hc++) {
Mark Lord352fab72008-04-19 14:43:42 -04001822 u32 relevant = main_cause & (HC0_IRQ_PEND << (hc * HC_SHIFT));
Brett Russ20f733e72005-09-01 18:26:17 -04001823 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001824 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001825 handled = 1;
Brett Russ20f733e72005-09-01 18:26:17 -04001826 }
1827 }
Mark Lord615ab952006-05-19 16:24:56 -04001828
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001829out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001830 spin_unlock(&host->lock);
Brett Russ20f733e72005-09-01 18:26:17 -04001831 return IRQ_RETVAL(handled);
1832}
1833
Jeff Garzikc9d39132005-11-13 17:47:51 -05001834static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1835{
1836 unsigned int ofs;
1837
1838 switch (sc_reg_in) {
1839 case SCR_STATUS:
1840 case SCR_ERROR:
1841 case SCR_CONTROL:
1842 ofs = sc_reg_in * sizeof(u32);
1843 break;
1844 default:
1845 ofs = 0xffffffffU;
1846 break;
1847 }
1848 return ofs;
1849}
1850
Tejun Heoda3dbb12007-07-16 14:29:40 +09001851static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001852{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001853 struct mv_host_priv *hpriv = ap->host->private_data;
1854 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001855 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001856 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1857
Tejun Heoda3dbb12007-07-16 14:29:40 +09001858 if (ofs != 0xffffffffU) {
1859 *val = readl(addr + ofs);
1860 return 0;
1861 } else
1862 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001863}
1864
Tejun Heoda3dbb12007-07-16 14:29:40 +09001865static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001866{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001867 struct mv_host_priv *hpriv = ap->host->private_data;
1868 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001869 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001870 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1871
Tejun Heoda3dbb12007-07-16 14:29:40 +09001872 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001873 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001874 return 0;
1875 } else
1876 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001877}
1878
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001879static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001880{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001881 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001882 int early_5080;
1883
Auke Kok44c10132007-06-08 15:46:36 -07001884 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001885
1886 if (!early_5080) {
1887 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1888 tmp |= (1 << 0);
1889 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1890 }
1891
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001892 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001893}
1894
1895static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1896{
1897 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1898}
1899
Jeff Garzik47c2b672005-11-12 21:13:17 -05001900static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001901 void __iomem *mmio)
1902{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001903 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1904 u32 tmp;
1905
1906 tmp = readl(phy_mmio + MV5_PHY_MODE);
1907
1908 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1909 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001910}
1911
Jeff Garzik47c2b672005-11-12 21:13:17 -05001912static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001913{
Jeff Garzik522479f2005-11-12 22:14:02 -05001914 u32 tmp;
1915
1916 writel(0, mmio + MV_GPIO_PORT_CTL);
1917
1918 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1919
1920 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1921 tmp |= ~(1 << 0);
1922 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001923}
1924
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001925static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1926 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001927{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001928 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1929 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1930 u32 tmp;
1931 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1932
1933 if (fix_apm_sq) {
1934 tmp = readl(phy_mmio + MV5_LT_MODE);
1935 tmp |= (1 << 19);
1936 writel(tmp, phy_mmio + MV5_LT_MODE);
1937
1938 tmp = readl(phy_mmio + MV5_PHY_CTL);
1939 tmp &= ~0x3;
1940 tmp |= 0x1;
1941 writel(tmp, phy_mmio + MV5_PHY_CTL);
1942 }
1943
1944 tmp = readl(phy_mmio + MV5_PHY_MODE);
1945 tmp &= ~mask;
1946 tmp |= hpriv->signal[port].pre;
1947 tmp |= hpriv->signal[port].amps;
1948 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001949}
1950
Jeff Garzikc9d39132005-11-13 17:47:51 -05001951
1952#undef ZERO
1953#define ZERO(reg) writel(0, port_mmio + (reg))
1954static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1955 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001956{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001957 void __iomem *port_mmio = mv_port_base(mmio, port);
1958
Mark Lordb5624682008-03-31 19:34:40 -04001959 /*
1960 * The datasheet warns against setting ATA_RST when EDMA is active
1961 * (but doesn't say what the problem might be). So we first try
1962 * to disable the EDMA engine before doing the ATA_RST operation.
1963 */
Mark Lorde12bef52008-03-31 19:33:56 -04001964 mv_reset_channel(hpriv, mmio, port);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001965
1966 ZERO(0x028); /* command */
1967 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1968 ZERO(0x004); /* timer */
1969 ZERO(0x008); /* irq err cause */
1970 ZERO(0x00c); /* irq err mask */
1971 ZERO(0x010); /* rq bah */
1972 ZERO(0x014); /* rq inp */
1973 ZERO(0x018); /* rq outp */
1974 ZERO(0x01c); /* respq bah */
1975 ZERO(0x024); /* respq outp */
1976 ZERO(0x020); /* respq inp */
1977 ZERO(0x02c); /* test control */
1978 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1979}
1980#undef ZERO
1981
1982#define ZERO(reg) writel(0, hc_mmio + (reg))
1983static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1984 unsigned int hc)
1985{
1986 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1987 u32 tmp;
1988
1989 ZERO(0x00c);
1990 ZERO(0x010);
1991 ZERO(0x014);
1992 ZERO(0x018);
1993
1994 tmp = readl(hc_mmio + 0x20);
1995 tmp &= 0x1c1c1c1c;
1996 tmp |= 0x03030303;
1997 writel(tmp, hc_mmio + 0x20);
1998}
1999#undef ZERO
2000
2001static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2002 unsigned int n_hc)
2003{
2004 unsigned int hc, port;
2005
2006 for (hc = 0; hc < n_hc; hc++) {
2007 for (port = 0; port < MV_PORTS_PER_HC; port++)
2008 mv5_reset_hc_port(hpriv, mmio,
2009 (hc * MV_PORTS_PER_HC) + port);
2010
2011 mv5_reset_one_hc(hpriv, mmio, hc);
2012 }
2013
2014 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002015}
2016
Jeff Garzik101ffae2005-11-12 22:17:49 -05002017#undef ZERO
2018#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002019static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002020{
Mark Lord02a121d2007-12-01 13:07:22 -05002021 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002022 u32 tmp;
2023
2024 tmp = readl(mmio + MV_PCI_MODE);
2025 tmp &= 0xff00ffff;
2026 writel(tmp, mmio + MV_PCI_MODE);
2027
2028 ZERO(MV_PCI_DISC_TIMER);
2029 ZERO(MV_PCI_MSI_TRIGGER);
2030 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2031 ZERO(HC_MAIN_IRQ_MASK_OFS);
2032 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002033 ZERO(hpriv->irq_cause_ofs);
2034 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002035 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2036 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2037 ZERO(MV_PCI_ERR_ATTRIBUTE);
2038 ZERO(MV_PCI_ERR_COMMAND);
2039}
2040#undef ZERO
2041
2042static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2043{
2044 u32 tmp;
2045
2046 mv5_reset_flash(hpriv, mmio);
2047
2048 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2049 tmp &= 0x3;
2050 tmp |= (1 << 5) | (1 << 6);
2051 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2052}
2053
2054/**
2055 * mv6_reset_hc - Perform the 6xxx global soft reset
2056 * @mmio: base address of the HBA
2057 *
2058 * This routine only applies to 6xxx parts.
2059 *
2060 * LOCKING:
2061 * Inherited from caller.
2062 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002063static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2064 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002065{
2066 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2067 int i, rc = 0;
2068 u32 t;
2069
2070 /* Following procedure defined in PCI "main command and status
2071 * register" table.
2072 */
2073 t = readl(reg);
2074 writel(t | STOP_PCI_MASTER, reg);
2075
2076 for (i = 0; i < 1000; i++) {
2077 udelay(1);
2078 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002079 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002080 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002081 }
2082 if (!(PCI_MASTER_EMPTY & t)) {
2083 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2084 rc = 1;
2085 goto done;
2086 }
2087
2088 /* set reset */
2089 i = 5;
2090 do {
2091 writel(t | GLOB_SFT_RST, reg);
2092 t = readl(reg);
2093 udelay(1);
2094 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2095
2096 if (!(GLOB_SFT_RST & t)) {
2097 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2098 rc = 1;
2099 goto done;
2100 }
2101
2102 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2103 i = 5;
2104 do {
2105 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2106 t = readl(reg);
2107 udelay(1);
2108 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2109
2110 if (GLOB_SFT_RST & t) {
2111 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2112 rc = 1;
2113 }
Mark Lord094e50b2008-04-16 15:01:19 -04002114 /*
2115 * Temporary: wait 3 seconds before port-probing can happen,
2116 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2117 * This can go away once hotplug is fully/correctly implemented.
2118 */
2119 if (rc == 0)
2120 msleep(3000);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002121done:
2122 return rc;
2123}
2124
Jeff Garzik47c2b672005-11-12 21:13:17 -05002125static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002126 void __iomem *mmio)
2127{
2128 void __iomem *port_mmio;
2129 u32 tmp;
2130
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002131 tmp = readl(mmio + MV_RESET_CFG);
2132 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002133 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002134 hpriv->signal[idx].pre = 0x1 << 5;
2135 return;
2136 }
2137
2138 port_mmio = mv_port_base(mmio, idx);
2139 tmp = readl(port_mmio + PHY_MODE2);
2140
2141 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2142 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2143}
2144
Jeff Garzik47c2b672005-11-12 21:13:17 -05002145static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002146{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002147 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002148}
2149
Jeff Garzikc9d39132005-11-13 17:47:51 -05002150static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002151 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002152{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002153 void __iomem *port_mmio = mv_port_base(mmio, port);
2154
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002155 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002156 int fix_phy_mode2 =
2157 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002158 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002159 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2160 u32 m2, tmp;
2161
2162 if (fix_phy_mode2) {
2163 m2 = readl(port_mmio + PHY_MODE2);
2164 m2 &= ~(1 << 16);
2165 m2 |= (1 << 31);
2166 writel(m2, port_mmio + PHY_MODE2);
2167
2168 udelay(200);
2169
2170 m2 = readl(port_mmio + PHY_MODE2);
2171 m2 &= ~((1 << 16) | (1 << 31));
2172 writel(m2, port_mmio + PHY_MODE2);
2173
2174 udelay(200);
2175 }
2176
2177 /* who knows what this magic does */
2178 tmp = readl(port_mmio + PHY_MODE3);
2179 tmp &= ~0x7F800000;
2180 tmp |= 0x2A800000;
2181 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002182
2183 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002184 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002185
2186 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002187
2188 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002189 tmp = readl(port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002190
Mark Lorde12bef52008-03-31 19:33:56 -04002191 /* workaround for errata FEr SATA#10 (part 1) */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002192 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2193
2194 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002195
2196 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002197 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002198 }
2199
2200 /* Revert values of pre-emphasis and signal amps to the saved ones */
2201 m2 = readl(port_mmio + PHY_MODE2);
2202
2203 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002204 m2 |= hpriv->signal[port].amps;
2205 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002206 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002207
Jeff Garzike4e7b892006-01-31 12:18:41 -05002208 /* according to mvSata 3.6.1, some IIE values are fixed */
2209 if (IS_GEN_IIE(hpriv)) {
2210 m2 &= ~0xC30FF01F;
2211 m2 |= 0x0000900F;
2212 }
2213
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002214 writel(m2, port_mmio + PHY_MODE2);
2215}
2216
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002217/* TODO: use the generic LED interface to configure the SATA Presence */
2218/* & Acitivy LEDs on the board */
2219static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2220 void __iomem *mmio)
2221{
2222 return;
2223}
2224
2225static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2226 void __iomem *mmio)
2227{
2228 void __iomem *port_mmio;
2229 u32 tmp;
2230
2231 port_mmio = mv_port_base(mmio, idx);
2232 tmp = readl(port_mmio + PHY_MODE2);
2233
2234 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2235 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2236}
2237
2238#undef ZERO
2239#define ZERO(reg) writel(0, port_mmio + (reg))
2240static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2241 void __iomem *mmio, unsigned int port)
2242{
2243 void __iomem *port_mmio = mv_port_base(mmio, port);
2244
Mark Lordb5624682008-03-31 19:34:40 -04002245 /*
2246 * The datasheet warns against setting ATA_RST when EDMA is active
2247 * (but doesn't say what the problem might be). So we first try
2248 * to disable the EDMA engine before doing the ATA_RST operation.
2249 */
Mark Lorde12bef52008-03-31 19:33:56 -04002250 mv_reset_channel(hpriv, mmio, port);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002251
2252 ZERO(0x028); /* command */
2253 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2254 ZERO(0x004); /* timer */
2255 ZERO(0x008); /* irq err cause */
2256 ZERO(0x00c); /* irq err mask */
2257 ZERO(0x010); /* rq bah */
2258 ZERO(0x014); /* rq inp */
2259 ZERO(0x018); /* rq outp */
2260 ZERO(0x01c); /* respq bah */
2261 ZERO(0x024); /* respq outp */
2262 ZERO(0x020); /* respq inp */
2263 ZERO(0x02c); /* test control */
2264 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2265}
2266
2267#undef ZERO
2268
2269#define ZERO(reg) writel(0, hc_mmio + (reg))
2270static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2271 void __iomem *mmio)
2272{
2273 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2274
2275 ZERO(0x00c);
2276 ZERO(0x010);
2277 ZERO(0x014);
2278
2279}
2280
2281#undef ZERO
2282
2283static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2284 void __iomem *mmio, unsigned int n_hc)
2285{
2286 unsigned int port;
2287
2288 for (port = 0; port < hpriv->n_ports; port++)
2289 mv_soc_reset_hc_port(hpriv, mmio, port);
2290
2291 mv_soc_reset_one_hc(hpriv, mmio);
2292
2293 return 0;
2294}
2295
2296static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2297 void __iomem *mmio)
2298{
2299 return;
2300}
2301
2302static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2303{
2304 return;
2305}
2306
Mark Lordb67a1062008-03-31 19:35:13 -04002307static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2308{
2309 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2310
2311 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2312 if (want_gen2i)
2313 ifctl |= (1 << 7); /* enable gen2i speed */
2314 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2315}
2316
Mark Lordb5624682008-03-31 19:34:40 -04002317/*
2318 * Caller must ensure that EDMA is not active,
2319 * by first doing mv_stop_edma() where needed.
2320 */
Mark Lorde12bef52008-03-31 19:33:56 -04002321static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -05002322 unsigned int port_no)
Brett Russ20f733e72005-09-01 18:26:17 -04002323{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002324 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e72005-09-01 18:26:17 -04002325
Mark Lord0d8be5c2008-04-16 14:56:12 -04002326 mv_stop_edma_engine(port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04002327 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002328
Mark Lordb67a1062008-03-31 19:35:13 -04002329 if (!IS_GEN_I(hpriv)) {
2330 /* Enable 3.0gb/s link speed */
2331 mv_setup_ifctl(port_mmio, 1);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002332 }
Mark Lordb67a1062008-03-31 19:35:13 -04002333 /*
2334 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2335 * link, and physical layers. It resets all SATA interface registers
2336 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
Brett Russ20f733e72005-09-01 18:26:17 -04002337 */
Mark Lordb67a1062008-03-31 19:35:13 -04002338 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2339 udelay(25); /* allow reset propagation */
Brett Russ31961942005-09-30 01:36:00 -04002340 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e72005-09-01 18:26:17 -04002341
Jeff Garzikc9d39132005-11-13 17:47:51 -05002342 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2343
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002344 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002345 mdelay(1);
2346}
2347
Mark Lorde49856d2008-04-16 14:59:07 -04002348static void mv_pmp_select(struct ata_port *ap, int pmp)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002349{
Mark Lorde49856d2008-04-16 14:59:07 -04002350 if (sata_pmp_supported(ap)) {
2351 void __iomem *port_mmio = mv_ap_base(ap);
2352 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2353 int old = reg & 0xf;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002354
Mark Lorde49856d2008-04-16 14:59:07 -04002355 if (old != pmp) {
2356 reg = (reg & ~0xf) | pmp;
2357 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2358 }
Tejun Heoda3dbb12007-07-16 14:29:40 +09002359 }
Brett Russ20f733e72005-09-01 18:26:17 -04002360}
2361
Mark Lorde49856d2008-04-16 14:59:07 -04002362static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2363 unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002364{
Mark Lorde49856d2008-04-16 14:59:07 -04002365 mv_pmp_select(link->ap, sata_srst_pmp(link));
2366 return sata_std_hardreset(link, class, deadline);
2367}
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002368
Mark Lorde49856d2008-04-16 14:59:07 -04002369static int mv_softreset(struct ata_link *link, unsigned int *class,
2370 unsigned long deadline)
2371{
2372 mv_pmp_select(link->ap, sata_srst_pmp(link));
2373 return ata_sff_softreset(link, class, deadline);
Jeff Garzik22374672005-11-17 10:59:48 -05002374}
2375
Tejun Heocc0680a2007-08-06 18:36:23 +09002376static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002377 unsigned long deadline)
2378{
Tejun Heocc0680a2007-08-06 18:36:23 +09002379 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002380 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordb5624682008-03-31 19:34:40 -04002381 struct mv_port_priv *pp = ap->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002382 void __iomem *mmio = hpriv->base;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002383 int rc, attempts = 0, extra = 0;
2384 u32 sstatus;
2385 bool online;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002386
Mark Lorde12bef52008-03-31 19:33:56 -04002387 mv_reset_channel(hpriv, mmio, ap->port_no);
Mark Lordb5624682008-03-31 19:34:40 -04002388 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002389
Mark Lord0d8be5c2008-04-16 14:56:12 -04002390 /* Workaround for errata FEr SATA#10 (part 2) */
2391 do {
Mark Lord17c5aab2008-04-16 14:56:51 -04002392 const unsigned long *timing =
2393 sata_ehc_deb_timing(&link->eh_context);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002394
Mark Lord17c5aab2008-04-16 14:56:51 -04002395 rc = sata_link_hardreset(link, timing, deadline + extra,
2396 &online, NULL);
2397 if (rc)
Mark Lord0d8be5c2008-04-16 14:56:12 -04002398 return rc;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002399 sata_scr_read(link, SCR_STATUS, &sstatus);
2400 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2401 /* Force 1.5gb/s link speed and try again */
2402 mv_setup_ifctl(mv_ap_base(ap), 0);
2403 if (time_after(jiffies + HZ, deadline))
2404 extra = HZ; /* only extend it once, max */
2405 }
2406 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002407
Mark Lord17c5aab2008-04-16 14:56:51 -04002408 return rc;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002409}
2410
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002411static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e72005-09-01 18:26:17 -04002412{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002413 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002414 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002415 unsigned int shift;
Mark Lord352fab72008-04-19 14:43:42 -04002416 u32 main_mask;
Brett Russ31961942005-09-30 01:36:00 -04002417
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002418 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002419
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002420 shift = ap->port_no * 2;
2421 if (hc > 0)
2422 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002423
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002424 /* disable assertion of portN err, done events */
Mark Lord352fab72008-04-19 14:43:42 -04002425 main_mask = readl(hpriv->main_mask_reg_addr);
2426 main_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
2427 writelfl(main_mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002428}
2429
2430static void mv_eh_thaw(struct ata_port *ap)
2431{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002432 struct mv_host_priv *hpriv = ap->host->private_data;
2433 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002434 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2435 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2436 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002437 unsigned int shift, hc_port_no = ap->port_no;
Mark Lord352fab72008-04-19 14:43:42 -04002438 u32 main_mask, hc_irq_cause;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002439
2440 /* FIXME: handle coalescing completion events properly */
2441
2442 shift = ap->port_no * 2;
2443 if (hc > 0) {
2444 shift++;
2445 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002446 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002447
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002448 /* clear EDMA errors on this port */
2449 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2450
2451 /* clear pending irq events */
2452 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Mark Lord352fab72008-04-19 14:43:42 -04002453 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hc_port_no);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002454 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2455
2456 /* enable assertion of portN err, done events */
Mark Lord352fab72008-04-19 14:43:42 -04002457 main_mask = readl(hpriv->main_mask_reg_addr);
2458 main_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
2459 writelfl(main_mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002460}
2461
Brett Russ05b308e2005-10-05 17:08:53 -04002462/**
2463 * mv_port_init - Perform some early initialization on a single port.
2464 * @port: libata data structure storing shadow register addresses
2465 * @port_mmio: base address of the port
2466 *
2467 * Initialize shadow register mmio addresses, clear outstanding
2468 * interrupts on the port, and unmask interrupts for the future
2469 * start of the port.
2470 *
2471 * LOCKING:
2472 * Inherited from caller.
2473 */
Brett Russ31961942005-09-30 01:36:00 -04002474static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2475{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002476 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002477 unsigned serr_ofs;
2478
Jeff Garzik8b260242005-11-12 12:32:50 -05002479 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002480 */
2481 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002482 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002483 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2484 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2485 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2486 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2487 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2488 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002489 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002490 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2491 /* special case: control/altstatus doesn't have ATA_REG_ address */
2492 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2493
2494 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002495 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e72005-09-01 18:26:17 -04002496
Brett Russ31961942005-09-30 01:36:00 -04002497 /* Clear any currently outstanding port interrupt conditions */
2498 serr_ofs = mv_scr_offset(SCR_ERROR);
2499 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2500 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2501
Mark Lord646a4da2008-01-26 18:30:37 -05002502 /* unmask all non-transient EDMA error interrupts */
2503 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e72005-09-01 18:26:17 -04002504
Jeff Garzik8b260242005-11-12 12:32:50 -05002505 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002506 readl(port_mmio + EDMA_CFG_OFS),
2507 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2508 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e72005-09-01 18:26:17 -04002509}
2510
Tejun Heo4447d352007-04-17 23:44:08 +09002511static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002512{
Tejun Heo4447d352007-04-17 23:44:08 +09002513 struct pci_dev *pdev = to_pci_dev(host->dev);
2514 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002515 u32 hp_flags = hpriv->hp_flags;
2516
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002517 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002518 case chip_5080:
2519 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002520 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002521
Auke Kok44c10132007-06-08 15:46:36 -07002522 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002523 case 0x1:
2524 hp_flags |= MV_HP_ERRATA_50XXB0;
2525 break;
2526 case 0x3:
2527 hp_flags |= MV_HP_ERRATA_50XXB2;
2528 break;
2529 default:
2530 dev_printk(KERN_WARNING, &pdev->dev,
2531 "Applying 50XXB2 workarounds to unknown rev\n");
2532 hp_flags |= MV_HP_ERRATA_50XXB2;
2533 break;
2534 }
2535 break;
2536
2537 case chip_504x:
2538 case chip_508x:
2539 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002540 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002541
Auke Kok44c10132007-06-08 15:46:36 -07002542 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002543 case 0x0:
2544 hp_flags |= MV_HP_ERRATA_50XXB0;
2545 break;
2546 case 0x3:
2547 hp_flags |= MV_HP_ERRATA_50XXB2;
2548 break;
2549 default:
2550 dev_printk(KERN_WARNING, &pdev->dev,
2551 "Applying B2 workarounds to unknown rev\n");
2552 hp_flags |= MV_HP_ERRATA_50XXB2;
2553 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002554 }
2555 break;
2556
2557 case chip_604x:
2558 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002559 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002560 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002561
Auke Kok44c10132007-06-08 15:46:36 -07002562 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002563 case 0x7:
2564 hp_flags |= MV_HP_ERRATA_60X1B2;
2565 break;
2566 case 0x9:
2567 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002568 break;
2569 default:
2570 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002571 "Applying B2 workarounds to unknown rev\n");
2572 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002573 break;
2574 }
2575 break;
2576
Jeff Garzike4e7b892006-01-31 12:18:41 -05002577 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002578 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002579 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2580 (pdev->device == 0x2300 || pdev->device == 0x2310))
2581 {
Mark Lord4e520032007-12-11 12:58:05 -05002582 /*
2583 * Highpoint RocketRAID PCIe 23xx series cards:
2584 *
2585 * Unconfigured drives are treated as "Legacy"
2586 * by the BIOS, and it overwrites sector 8 with
2587 * a "Lgcy" metadata block prior to Linux boot.
2588 *
2589 * Configured drives (RAID or JBOD) leave sector 8
2590 * alone, but instead overwrite a high numbered
2591 * sector for the RAID metadata. This sector can
2592 * be determined exactly, by truncating the physical
2593 * drive capacity to a nice even GB value.
2594 *
2595 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2596 *
2597 * Warn the user, lest they think we're just buggy.
2598 */
2599 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2600 " BIOS CORRUPTS DATA on all attached drives,"
2601 " regardless of if/how they are configured."
2602 " BEWARE!\n");
2603 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2604 " use sectors 8-9 on \"Legacy\" drives,"
2605 " and avoid the final two gigabytes on"
2606 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002607 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002608 case chip_6042:
2609 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002610 hp_flags |= MV_HP_GEN_IIE;
2611
Auke Kok44c10132007-06-08 15:46:36 -07002612 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002613 case 0x0:
2614 hp_flags |= MV_HP_ERRATA_XX42A0;
2615 break;
2616 case 0x1:
2617 hp_flags |= MV_HP_ERRATA_60X1C0;
2618 break;
2619 default:
2620 dev_printk(KERN_WARNING, &pdev->dev,
2621 "Applying 60X1C0 workarounds to unknown rev\n");
2622 hp_flags |= MV_HP_ERRATA_60X1C0;
2623 break;
2624 }
2625 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002626 case chip_soc:
2627 hpriv->ops = &mv_soc_ops;
2628 hp_flags |= MV_HP_ERRATA_60X1C0;
2629 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002630
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002631 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002632 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002633 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002634 return 1;
2635 }
2636
2637 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002638 if (hp_flags & MV_HP_PCIE) {
2639 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2640 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2641 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2642 } else {
2643 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2644 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2645 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2646 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002647
2648 return 0;
2649}
2650
Brett Russ05b308e2005-10-05 17:08:53 -04002651/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002652 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002653 * @host: ATA host to initialize
2654 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002655 *
2656 * If possible, do an early global reset of the host. Then do
2657 * our port init and clear/unmask all/relevant host interrupts.
2658 *
2659 * LOCKING:
2660 * Inherited from caller.
2661 */
Tejun Heo4447d352007-04-17 23:44:08 +09002662static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e72005-09-01 18:26:17 -04002663{
2664 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002665 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002666 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002667
Tejun Heo4447d352007-04-17 23:44:08 +09002668 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002669 if (rc)
Mark Lord352fab72008-04-19 14:43:42 -04002670 goto done;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002671
2672 if (HAS_PCI(host)) {
Mark Lord352fab72008-04-19 14:43:42 -04002673 hpriv->main_cause_reg_addr = mmio + HC_MAIN_IRQ_CAUSE_OFS;
2674 hpriv->main_mask_reg_addr = mmio + HC_MAIN_IRQ_MASK_OFS;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002675 } else {
Mark Lord352fab72008-04-19 14:43:42 -04002676 hpriv->main_cause_reg_addr = mmio + HC_SOC_MAIN_IRQ_CAUSE_OFS;
2677 hpriv->main_mask_reg_addr = mmio + HC_SOC_MAIN_IRQ_MASK_OFS;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002678 }
Mark Lord352fab72008-04-19 14:43:42 -04002679
2680 /* global interrupt mask: 0 == mask everything */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002681 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002682
Tejun Heo4447d352007-04-17 23:44:08 +09002683 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002684
Tejun Heo4447d352007-04-17 23:44:08 +09002685 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002686 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e72005-09-01 18:26:17 -04002687
Jeff Garzikc9d39132005-11-13 17:47:51 -05002688 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002689 if (rc)
Brett Russ20f733e72005-09-01 18:26:17 -04002690 goto done;
Brett Russ20f733e72005-09-01 18:26:17 -04002691
Jeff Garzik522479f2005-11-12 22:14:02 -05002692 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002693 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002694 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e72005-09-01 18:26:17 -04002695
Tejun Heo4447d352007-04-17 23:44:08 +09002696 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002697 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002698 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002699
2700 mv_port_init(&ap->ioaddr, port_mmio);
2701
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002702#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002703 if (HAS_PCI(host)) {
2704 unsigned int offset = port_mmio - mmio;
2705 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2706 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2707 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002708#endif
Brett Russ20f733e72005-09-01 18:26:17 -04002709 }
2710
2711 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002712 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2713
2714 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2715 "(before clear)=0x%08x\n", hc,
2716 readl(hc_mmio + HC_CFG_OFS),
2717 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2718
2719 /* Clear any currently outstanding hc interrupt conditions */
2720 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e72005-09-01 18:26:17 -04002721 }
2722
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002723 if (HAS_PCI(host)) {
2724 /* Clear any currently outstanding host interrupt conditions */
2725 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002726
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002727 /* and unmask interrupt generation for host regs */
2728 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2729 if (IS_GEN_I(hpriv))
2730 writelfl(~HC_MAIN_MASKED_IRQS_5,
2731 hpriv->main_mask_reg_addr);
2732 else
2733 writelfl(~HC_MAIN_MASKED_IRQS,
2734 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002735
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002736 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2737 "PCI int cause/mask=0x%08x/0x%08x\n",
2738 readl(hpriv->main_cause_reg_addr),
2739 readl(hpriv->main_mask_reg_addr),
2740 readl(mmio + hpriv->irq_cause_ofs),
2741 readl(mmio + hpriv->irq_mask_ofs));
2742 } else {
2743 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2744 hpriv->main_mask_reg_addr);
2745 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2746 readl(hpriv->main_cause_reg_addr),
2747 readl(hpriv->main_mask_reg_addr));
2748 }
Brett Russ31961942005-09-30 01:36:00 -04002749done:
Brett Russ20f733e72005-09-01 18:26:17 -04002750 return rc;
2751}
2752
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002753static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2754{
2755 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2756 MV_CRQB_Q_SZ, 0);
2757 if (!hpriv->crqb_pool)
2758 return -ENOMEM;
2759
2760 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2761 MV_CRPB_Q_SZ, 0);
2762 if (!hpriv->crpb_pool)
2763 return -ENOMEM;
2764
2765 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2766 MV_SG_TBL_SZ, 0);
2767 if (!hpriv->sg_tbl_pool)
2768 return -ENOMEM;
2769
2770 return 0;
2771}
2772
Lennert Buytenhek15a32632008-03-27 14:51:39 -04002773static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2774 struct mbus_dram_target_info *dram)
2775{
2776 int i;
2777
2778 for (i = 0; i < 4; i++) {
2779 writel(0, hpriv->base + WINDOW_CTRL(i));
2780 writel(0, hpriv->base + WINDOW_BASE(i));
2781 }
2782
2783 for (i = 0; i < dram->num_cs; i++) {
2784 struct mbus_dram_window *cs = dram->cs + i;
2785
2786 writel(((cs->size - 1) & 0xffff0000) |
2787 (cs->mbus_attr << 8) |
2788 (dram->mbus_dram_target_id << 4) | 1,
2789 hpriv->base + WINDOW_CTRL(i));
2790 writel(cs->base, hpriv->base + WINDOW_BASE(i));
2791 }
2792}
2793
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002794/**
2795 * mv_platform_probe - handle a positive probe of an soc Marvell
2796 * host
2797 * @pdev: platform device found
2798 *
2799 * LOCKING:
2800 * Inherited from caller.
2801 */
2802static int mv_platform_probe(struct platform_device *pdev)
2803{
2804 static int printed_version;
2805 const struct mv_sata_platform_data *mv_platform_data;
2806 const struct ata_port_info *ppi[] =
2807 { &mv_port_info[chip_soc], NULL };
2808 struct ata_host *host;
2809 struct mv_host_priv *hpriv;
2810 struct resource *res;
2811 int n_ports, rc;
2812
2813 if (!printed_version++)
2814 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2815
2816 /*
2817 * Simple resource validation ..
2818 */
2819 if (unlikely(pdev->num_resources != 2)) {
2820 dev_err(&pdev->dev, "invalid number of resources\n");
2821 return -EINVAL;
2822 }
2823
2824 /*
2825 * Get the register base first
2826 */
2827 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2828 if (res == NULL)
2829 return -EINVAL;
2830
2831 /* allocate host */
2832 mv_platform_data = pdev->dev.platform_data;
2833 n_ports = mv_platform_data->n_ports;
2834
2835 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2836 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2837
2838 if (!host || !hpriv)
2839 return -ENOMEM;
2840 host->private_data = hpriv;
2841 hpriv->n_ports = n_ports;
2842
2843 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002844 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2845 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002846 hpriv->base -= MV_SATAHC0_REG_BASE;
2847
Lennert Buytenhek15a32632008-03-27 14:51:39 -04002848 /*
2849 * (Re-)program MBUS remapping windows if we are asked to.
2850 */
2851 if (mv_platform_data->dram != NULL)
2852 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2853
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002854 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2855 if (rc)
2856 return rc;
2857
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002858 /* initialize adapter */
2859 rc = mv_init_host(host, chip_soc);
2860 if (rc)
2861 return rc;
2862
2863 dev_printk(KERN_INFO, &pdev->dev,
2864 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2865 host->n_ports);
2866
2867 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2868 IRQF_SHARED, &mv6_sht);
2869}
2870
2871/*
2872 *
2873 * mv_platform_remove - unplug a platform interface
2874 * @pdev: platform device
2875 *
2876 * A platform bus SATA device has been unplugged. Perform the needed
2877 * cleanup. Also called on module unload for any active devices.
2878 */
2879static int __devexit mv_platform_remove(struct platform_device *pdev)
2880{
2881 struct device *dev = &pdev->dev;
2882 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002883
2884 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002885 return 0;
2886}
2887
2888static struct platform_driver mv_platform_driver = {
2889 .probe = mv_platform_probe,
2890 .remove = __devexit_p(mv_platform_remove),
2891 .driver = {
2892 .name = DRV_NAME,
2893 .owner = THIS_MODULE,
2894 },
2895};
2896
2897
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002898#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002899static int mv_pci_init_one(struct pci_dev *pdev,
2900 const struct pci_device_id *ent);
2901
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002902
2903static struct pci_driver mv_pci_driver = {
2904 .name = DRV_NAME,
2905 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002906 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002907 .remove = ata_pci_remove_one,
2908};
2909
2910/*
2911 * module options
2912 */
2913static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2914
2915
2916/* move to PCI layer or libata core? */
2917static int pci_go_64(struct pci_dev *pdev)
2918{
2919 int rc;
2920
2921 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2922 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2923 if (rc) {
2924 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2925 if (rc) {
2926 dev_printk(KERN_ERR, &pdev->dev,
2927 "64-bit DMA enable failed\n");
2928 return rc;
2929 }
2930 }
2931 } else {
2932 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2933 if (rc) {
2934 dev_printk(KERN_ERR, &pdev->dev,
2935 "32-bit DMA enable failed\n");
2936 return rc;
2937 }
2938 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2939 if (rc) {
2940 dev_printk(KERN_ERR, &pdev->dev,
2941 "32-bit consistent DMA enable failed\n");
2942 return rc;
2943 }
2944 }
2945
2946 return rc;
2947}
2948
Brett Russ05b308e2005-10-05 17:08:53 -04002949/**
2950 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002951 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002952 *
2953 * FIXME: complete this.
2954 *
2955 * LOCKING:
2956 * Inherited from caller.
2957 */
Tejun Heo4447d352007-04-17 23:44:08 +09002958static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002959{
Tejun Heo4447d352007-04-17 23:44:08 +09002960 struct pci_dev *pdev = to_pci_dev(host->dev);
2961 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002962 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002963 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002964
2965 /* Use this to determine the HW stepping of the chip so we know
2966 * what errata to workaround
2967 */
Brett Russ31961942005-09-30 01:36:00 -04002968 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2969 if (scc == 0)
2970 scc_s = "SCSI";
2971 else if (scc == 0x01)
2972 scc_s = "RAID";
2973 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002974 scc_s = "?";
2975
2976 if (IS_GEN_I(hpriv))
2977 gen = "I";
2978 else if (IS_GEN_II(hpriv))
2979 gen = "II";
2980 else if (IS_GEN_IIE(hpriv))
2981 gen = "IIE";
2982 else
2983 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002984
Jeff Garzika9524a72005-10-30 14:39:11 -05002985 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002986 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2987 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002988 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2989}
2990
Brett Russ05b308e2005-10-05 17:08:53 -04002991/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002992 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04002993 * @pdev: PCI device found
2994 * @ent: PCI device ID entry for the matched host
2995 *
2996 * LOCKING:
2997 * Inherited from caller.
2998 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002999static int mv_pci_init_one(struct pci_dev *pdev,
3000 const struct pci_device_id *ent)
Brett Russ20f733e72005-09-01 18:26:17 -04003001{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003002 static int printed_version;
Brett Russ20f733e72005-09-01 18:26:17 -04003003 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09003004 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3005 struct ata_host *host;
3006 struct mv_host_priv *hpriv;
3007 int n_ports, rc;
Brett Russ20f733e72005-09-01 18:26:17 -04003008
Jeff Garzika9524a72005-10-30 14:39:11 -05003009 if (!printed_version++)
3010 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e72005-09-01 18:26:17 -04003011
Tejun Heo4447d352007-04-17 23:44:08 +09003012 /* allocate host */
3013 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3014
3015 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3016 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3017 if (!host || !hpriv)
3018 return -ENOMEM;
3019 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003020 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09003021
3022 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09003023 rc = pcim_enable_device(pdev);
3024 if (rc)
Brett Russ20f733e72005-09-01 18:26:17 -04003025 return rc;
Brett Russ20f733e72005-09-01 18:26:17 -04003026
Tejun Heo0d5ff562007-02-01 15:06:36 +09003027 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3028 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003029 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09003030 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003031 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09003032 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003033 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e72005-09-01 18:26:17 -04003034
Jeff Garzikd88184f2007-02-26 01:26:06 -05003035 rc = pci_go_64(pdev);
3036 if (rc)
3037 return rc;
3038
Mark Lordda2fa9b2008-01-26 18:32:45 -05003039 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3040 if (rc)
3041 return rc;
3042
Brett Russ20f733e72005-09-01 18:26:17 -04003043 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09003044 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09003045 if (rc)
3046 return rc;
Brett Russ20f733e72005-09-01 18:26:17 -04003047
Brett Russ31961942005-09-30 01:36:00 -04003048 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09003049 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04003050 pci_intx(pdev, 1);
Brett Russ20f733e72005-09-01 18:26:17 -04003051
Brett Russ31961942005-09-30 01:36:00 -04003052 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09003053 mv_print_info(host);
Brett Russ20f733e72005-09-01 18:26:17 -04003054
Tejun Heo4447d352007-04-17 23:44:08 +09003055 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04003056 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09003057 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04003058 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e72005-09-01 18:26:17 -04003059}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003060#endif
Brett Russ20f733e72005-09-01 18:26:17 -04003061
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003062static int mv_platform_probe(struct platform_device *pdev);
3063static int __devexit mv_platform_remove(struct platform_device *pdev);
3064
Brett Russ20f733e72005-09-01 18:26:17 -04003065static int __init mv_init(void)
3066{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003067 int rc = -ENODEV;
3068#ifdef CONFIG_PCI
3069 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003070 if (rc < 0)
3071 return rc;
3072#endif
3073 rc = platform_driver_register(&mv_platform_driver);
3074
3075#ifdef CONFIG_PCI
3076 if (rc < 0)
3077 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003078#endif
3079 return rc;
Brett Russ20f733e72005-09-01 18:26:17 -04003080}
3081
3082static void __exit mv_exit(void)
3083{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003084#ifdef CONFIG_PCI
Brett Russ20f733e72005-09-01 18:26:17 -04003085 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003086#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003087 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e72005-09-01 18:26:17 -04003088}
3089
3090MODULE_AUTHOR("Brett Russ");
3091MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3092MODULE_LICENSE("GPL");
3093MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3094MODULE_VERSION(DRV_VERSION);
Mark Lord17c5aab2008-04-16 14:56:51 -04003095MODULE_ALIAS("platform:" DRV_NAME);
Brett Russ20f733e72005-09-01 18:26:17 -04003096
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003097#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003098module_param(msi, int, 0444);
3099MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003100#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003101
Brett Russ20f733e72005-09-01 18:26:17 -04003102module_init(mv_init);
3103module_exit(mv_exit);