blob: ea7af1f168445b98268ed28568cf2690f5eff9dd [file] [log] [blame]
Brett Russ20f733e72005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e72005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Jeff Garzik4a05e202007-05-24 23:40:15 -040032 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
Jeff Garzik4a05e202007-05-24 23:40:15 -040038 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
Brett Russ20f733e72005-09-01 18:26:17 -040061#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
Brett Russ20f733e72005-09-01 18:26:17 -040068#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050069#include <linux/device.h>
Brett Russ20f733e72005-09-01 18:26:17 -040070#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050071#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040072#include <scsi/scsi_device.h>
Brett Russ20f733e72005-09-01 18:26:17 -040073#include <linux/libata.h>
Brett Russ20f733e72005-09-01 18:26:17 -040074
75#define DRV_NAME "sata_mv"
Jeff Garzik6c087722007-10-12 00:16:23 -040076#define DRV_VERSION "1.01"
Brett Russ20f733e72005-09-01 18:26:17 -040077
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040089 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
Brett Russ20f733e72005-09-01 18:26:17 -040095 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -050096 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -050097 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e72005-09-01 18:26:17 -040099
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
Brett Russ31961942005-09-30 01:36:00 -0400105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400110 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
111 */
112 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
113 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500114 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400115 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400116
Brett Russ20f733e72005-09-01 18:26:17 -0400117 MV_PORTS_PER_HC = 4,
118 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
119 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400120 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e72005-09-01 18:26:17 -0400121 MV_PORT_MASK = 3,
122
123 /* Host Flags */
124 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
125 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400126 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400127 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
128 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500129 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e72005-09-01 18:26:17 -0400130
Brett Russ31961942005-09-30 01:36:00 -0400131 CRQB_FLAG_READ = (1 << 0),
132 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400133 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
134 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400135 CRQB_CMD_ADDR_SHIFT = 8,
136 CRQB_CMD_CS = (0x2 << 11),
137 CRQB_CMD_LAST = (1 << 15),
138
139 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400140 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
141 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400142
143 EPRD_FLAG_END_OF_TBL = (1 << 31),
144
Brett Russ20f733e72005-09-01 18:26:17 -0400145 /* PCI interface registers */
146
Brett Russ31961942005-09-30 01:36:00 -0400147 PCI_COMMAND_OFS = 0xc00,
148
Brett Russ20f733e72005-09-01 18:26:17 -0400149 PCI_MAIN_CMD_STS_OFS = 0xd30,
150 STOP_PCI_MASTER = (1 << 2),
151 PCI_MASTER_EMPTY = (1 << 3),
152 GLOB_SFT_RST = (1 << 4),
153
Jeff Garzik522479f2005-11-12 22:14:02 -0500154 MV_PCI_MODE = 0xd00,
155 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
156 MV_PCI_DISC_TIMER = 0xd04,
157 MV_PCI_MSI_TRIGGER = 0xc38,
158 MV_PCI_SERR_MASK = 0xc28,
159 MV_PCI_XBAR_TMOUT = 0x1d04,
160 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
161 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
162 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
163 MV_PCI_ERR_COMMAND = 0x1d50,
164
Mark Lord02a121d2007-12-01 13:07:22 -0500165 PCI_IRQ_CAUSE_OFS = 0x1d58,
166 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e72005-09-01 18:26:17 -0400167 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
168
Mark Lord02a121d2007-12-01 13:07:22 -0500169 PCIE_IRQ_CAUSE_OFS = 0x1900,
170 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500171 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500172
Brett Russ20f733e72005-09-01 18:26:17 -0400173 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
174 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
175 PORT0_ERR = (1 << 0), /* shift by port # */
176 PORT0_DONE = (1 << 1), /* shift by port # */
177 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
178 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
179 PCI_ERR = (1 << 18),
180 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
181 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500182 PORTS_0_3_COAL_DONE = (1 << 8),
183 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e72005-09-01 18:26:17 -0400184 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
185 GPIO_INT = (1 << 22),
186 SELF_INT = (1 << 23),
187 TWSI_INT = (1 << 24),
188 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500189 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500190 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e72005-09-01 18:26:17 -0400191 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
192 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500193 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
194 HC_MAIN_RSVD_5),
Brett Russ20f733e72005-09-01 18:26:17 -0400195
196 /* SATAHC registers */
197 HC_CFG_OFS = 0,
198
199 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400200 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e72005-09-01 18:26:17 -0400201 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
202 DEV_IRQ = (1 << 8), /* shift by port # */
203
204 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400205 SHD_BLK_OFS = 0x100,
206 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e72005-09-01 18:26:17 -0400207
208 /* SATA registers */
209 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
210 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500211 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500212 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500213 PHY_MODE4 = 0x314,
214 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500215 MV5_PHY_MODE = 0x74,
216 MV5_LT_MODE = 0x30,
217 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500218 SATA_INTERFACE_CTL = 0x050,
219
220 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e72005-09-01 18:26:17 -0400221
222 /* Port registers */
223 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500224 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
225 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
226 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
227 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
228 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e72005-09-01 18:26:17 -0400229
230 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
231 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400232 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
233 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
234 EDMA_ERR_DEV = (1 << 2), /* device error */
235 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
236 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
237 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400238 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
239 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400240 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400241 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400242 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
243 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
244 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
245 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500246
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400247 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500248 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
249 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
250 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
251 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
252
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400253 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500254
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400255 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500256 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
257 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
258 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
259 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
260 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
261
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400262 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500263
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400264 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400265 EDMA_ERR_OVERRUN_5 = (1 << 5),
266 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500267
268 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
269 EDMA_ERR_LNK_CTRL_RX_1 |
270 EDMA_ERR_LNK_CTRL_RX_3 |
271 EDMA_ERR_LNK_CTRL_TX,
272
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400273 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
274 EDMA_ERR_PRD_PAR |
275 EDMA_ERR_DEV_DCON |
276 EDMA_ERR_DEV_CON |
277 EDMA_ERR_SERR |
278 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400279 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400280 EDMA_ERR_CRPB_PAR |
281 EDMA_ERR_INTRL_PAR |
282 EDMA_ERR_IORDY |
283 EDMA_ERR_LNK_CTRL_RX_2 |
284 EDMA_ERR_LNK_DATA_RX |
285 EDMA_ERR_LNK_DATA_TX |
286 EDMA_ERR_TRANS_PROTO,
287 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_OVERRUN_5 |
292 EDMA_ERR_UNDERRUN_5 |
293 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400294 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400295 EDMA_ERR_CRPB_PAR |
296 EDMA_ERR_INTRL_PAR |
297 EDMA_ERR_IORDY,
Brett Russ20f733e72005-09-01 18:26:17 -0400298
Brett Russ31961942005-09-30 01:36:00 -0400299 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
300 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400301
302 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
303 EDMA_REQ_Q_PTR_SHIFT = 5,
304
305 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
306 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
307 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400308 EDMA_RSP_Q_PTR_SHIFT = 3,
309
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400310 EDMA_CMD_OFS = 0x28, /* EDMA command register */
311 EDMA_EN = (1 << 0), /* enable EDMA */
312 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
313 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e72005-09-01 18:26:17 -0400314
Jeff Garzikc9d39132005-11-13 17:47:51 -0500315 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500316 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500317
Brett Russ31961942005-09-30 01:36:00 -0400318 /* Host private flags (hp_flags) */
319 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500320 MV_HP_ERRATA_50XXB0 = (1 << 1),
321 MV_HP_ERRATA_50XXB2 = (1 << 2),
322 MV_HP_ERRATA_60X1B2 = (1 << 3),
323 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500324 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400325 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
326 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
327 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500328 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e72005-09-01 18:26:17 -0400329
Brett Russ31961942005-09-30 01:36:00 -0400330 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400331 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500332 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400333 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400334};
335
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400336#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
337#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500338#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500339
Jeff Garzik095fec82005-11-12 09:50:49 -0500340enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400341 /* DMA boundary 0xffff is required by the s/g splitting
342 * we need on /length/ in mv_fill-sg().
343 */
344 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500345
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400346 /* mask of register bits containing lower 32 bits
347 * of EDMA request queue DMA address
348 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500349 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
350
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400351 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500352 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
353};
354
Jeff Garzik522479f2005-11-12 22:14:02 -0500355enum chip_type {
356 chip_504x,
357 chip_508x,
358 chip_5080,
359 chip_604x,
360 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500361 chip_6042,
362 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500363};
364
Brett Russ31961942005-09-30 01:36:00 -0400365/* Command ReQuest Block: 32B */
366struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400367 __le32 sg_addr;
368 __le32 sg_addr_hi;
369 __le16 ctrl_flags;
370 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400371};
372
Jeff Garzike4e7b892006-01-31 12:18:41 -0500373struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400374 __le32 addr;
375 __le32 addr_hi;
376 __le32 flags;
377 __le32 len;
378 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500379};
380
Brett Russ31961942005-09-30 01:36:00 -0400381/* Command ResPonse Block: 8B */
382struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400383 __le16 id;
384 __le16 flags;
385 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400386};
387
388/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
389struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400390 __le32 addr;
391 __le32 flags_size;
392 __le32 addr_hi;
393 __le32 reserved;
Brett Russ20f733e72005-09-01 18:26:17 -0400394};
395
396struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400397 struct mv_crqb *crqb;
398 dma_addr_t crqb_dma;
399 struct mv_crpb *crpb;
400 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500401 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
402 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400403
404 unsigned int req_idx;
405 unsigned int resp_idx;
406
Brett Russ31961942005-09-30 01:36:00 -0400407 u32 pp_flags;
Brett Russ20f733e72005-09-01 18:26:17 -0400408};
409
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500410struct mv_port_signal {
411 u32 amps;
412 u32 pre;
413};
414
Mark Lord02a121d2007-12-01 13:07:22 -0500415struct mv_host_priv {
416 u32 hp_flags;
417 struct mv_port_signal signal[8];
418 const struct mv_hw_ops *ops;
419 u32 irq_cause_ofs;
420 u32 irq_mask_ofs;
421 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500422 /*
423 * These consistent DMA memory pools give us guaranteed
424 * alignment for hardware-accessed data structures,
425 * and less memory waste in accomplishing the alignment.
426 */
427 struct dma_pool *crqb_pool;
428 struct dma_pool *crpb_pool;
429 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500430};
431
Jeff Garzik47c2b672005-11-12 21:13:17 -0500432struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500433 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
434 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500435 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
436 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
437 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500438 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
439 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500440 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
441 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500442};
443
Brett Russ20f733e72005-09-01 18:26:17 -0400444static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900445static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
446static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
447static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
448static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400449static int mv_port_start(struct ata_port *ap);
450static void mv_port_stop(struct ata_port *ap);
451static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500452static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900453static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400454static void mv_error_handler(struct ata_port *ap);
455static void mv_post_int_cmd(struct ata_queued_cmd *qc);
456static void mv_eh_freeze(struct ata_port *ap);
457static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500458static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e72005-09-01 18:26:17 -0400459static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
460
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500461static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
462 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500463static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
464static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
465 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500466static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
467 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500468static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
469static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500470
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500471static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
472 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500473static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
474static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
475 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500476static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
477 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500478static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
479static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500480static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
481 unsigned int port_no);
Mark Lord72109162008-01-26 18:31:33 -0500482static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
483 void __iomem *port_mmio, int want_ncq);
484static int __mv_stop_dma(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500485
Mark Lordeb73d552008-01-29 13:24:00 -0500486/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
487 * because we have to allow room for worst case splitting of
488 * PRDs for 64K boundaries in mv_fill_sg().
489 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400490static struct scsi_host_template mv5_sht = {
Brett Russ20f733e72005-09-01 18:26:17 -0400491 .module = THIS_MODULE,
492 .name = DRV_NAME,
493 .ioctl = ata_scsi_ioctl,
494 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400495 .can_queue = ATA_DEF_QUEUE,
496 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400497 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400498 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
499 .emulated = ATA_SHT_EMULATED,
500 .use_clustering = 1,
501 .proc_name = DRV_NAME,
502 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400503 .slave_configure = ata_scsi_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400504 .slave_destroy = ata_scsi_slave_destroy,
505 .bios_param = ata_std_bios_param,
506};
507
508static struct scsi_host_template mv6_sht = {
509 .module = THIS_MODULE,
510 .name = DRV_NAME,
511 .ioctl = ata_scsi_ioctl,
512 .queuecommand = ata_scsi_queuecmd,
513 .can_queue = ATA_DEF_QUEUE,
Brett Russ20f733e72005-09-01 18:26:17 -0400514 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400515 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e72005-09-01 18:26:17 -0400516 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
517 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500518 .use_clustering = 1,
Brett Russ20f733e72005-09-01 18:26:17 -0400519 .proc_name = DRV_NAME,
520 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400521 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900522 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e72005-09-01 18:26:17 -0400523 .bios_param = ata_std_bios_param,
Brett Russ20f733e72005-09-01 18:26:17 -0400524};
525
Jeff Garzikc9d39132005-11-13 17:47:51 -0500526static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500527 .tf_load = ata_tf_load,
528 .tf_read = ata_tf_read,
529 .check_status = ata_check_status,
530 .exec_command = ata_exec_command,
531 .dev_select = ata_std_dev_select,
532
Jeff Garzikcffacd82007-03-09 09:46:47 -0500533 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500534
535 .qc_prep = mv_qc_prep,
536 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900537 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500538
Jeff Garzikc9d39132005-11-13 17:47:51 -0500539 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900540 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500541
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400542 .error_handler = mv_error_handler,
543 .post_internal_cmd = mv_post_int_cmd,
544 .freeze = mv_eh_freeze,
545 .thaw = mv_eh_thaw,
546
Jeff Garzikc9d39132005-11-13 17:47:51 -0500547 .scr_read = mv5_scr_read,
548 .scr_write = mv5_scr_write,
549
550 .port_start = mv_port_start,
551 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500552};
553
554static const struct ata_port_operations mv6_ops = {
Mark Lordf2738272008-01-26 18:32:29 -0500555 .dev_config = mv6_dev_config,
Brett Russ20f733e72005-09-01 18:26:17 -0400556 .tf_load = ata_tf_load,
557 .tf_read = ata_tf_read,
558 .check_status = ata_check_status,
559 .exec_command = ata_exec_command,
560 .dev_select = ata_std_dev_select,
561
Jeff Garzikcffacd82007-03-09 09:46:47 -0500562 .cable_detect = ata_cable_sata,
Brett Russ20f733e72005-09-01 18:26:17 -0400563
Brett Russ31961942005-09-30 01:36:00 -0400564 .qc_prep = mv_qc_prep,
565 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900566 .data_xfer = ata_data_xfer,
Brett Russ20f733e72005-09-01 18:26:17 -0400567
Brett Russ20f733e72005-09-01 18:26:17 -0400568 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900569 .irq_on = ata_irq_on,
Brett Russ20f733e72005-09-01 18:26:17 -0400570
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400571 .error_handler = mv_error_handler,
572 .post_internal_cmd = mv_post_int_cmd,
573 .freeze = mv_eh_freeze,
574 .thaw = mv_eh_thaw,
575
Brett Russ20f733e72005-09-01 18:26:17 -0400576 .scr_read = mv_scr_read,
577 .scr_write = mv_scr_write,
578
Brett Russ31961942005-09-30 01:36:00 -0400579 .port_start = mv_port_start,
580 .port_stop = mv_port_stop,
Brett Russ20f733e72005-09-01 18:26:17 -0400581};
582
Jeff Garzike4e7b892006-01-31 12:18:41 -0500583static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500584 .tf_load = ata_tf_load,
585 .tf_read = ata_tf_read,
586 .check_status = ata_check_status,
587 .exec_command = ata_exec_command,
588 .dev_select = ata_std_dev_select,
589
Jeff Garzikcffacd82007-03-09 09:46:47 -0500590 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500591
592 .qc_prep = mv_qc_prep_iie,
593 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900594 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500595
Jeff Garzike4e7b892006-01-31 12:18:41 -0500596 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900597 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500598
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400599 .error_handler = mv_error_handler,
600 .post_internal_cmd = mv_post_int_cmd,
601 .freeze = mv_eh_freeze,
602 .thaw = mv_eh_thaw,
603
Jeff Garzike4e7b892006-01-31 12:18:41 -0500604 .scr_read = mv_scr_read,
605 .scr_write = mv_scr_write,
606
607 .port_start = mv_port_start,
608 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500609};
610
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100611static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e72005-09-01 18:26:17 -0400612 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400613 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400614 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400615 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500616 .port_ops = &mv5_ops,
Brett Russ20f733e72005-09-01 18:26:17 -0400617 },
618 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400619 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400620 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400621 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500622 .port_ops = &mv5_ops,
Brett Russ20f733e72005-09-01 18:26:17 -0400623 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500624 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400625 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500626 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400627 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500628 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500629 },
Brett Russ20f733e72005-09-01 18:26:17 -0400630 { /* chip_604x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400631 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400632 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400633 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500634 .port_ops = &mv6_ops,
Brett Russ20f733e72005-09-01 18:26:17 -0400635 },
636 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400637 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
638 MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400639 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400640 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500641 .port_ops = &mv6_ops,
Brett Russ20f733e72005-09-01 18:26:17 -0400642 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500643 { /* chip_6042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400644 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500645 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400646 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500647 .port_ops = &mv_iie_ops,
648 },
649 { /* chip_7042 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400650 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500651 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400652 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500653 .port_ops = &mv_iie_ops,
654 },
Brett Russ20f733e72005-09-01 18:26:17 -0400655};
656
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500657static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400658 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
659 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
660 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
661 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100662 /* RocketRAID 1740/174x have different identifiers */
663 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
664 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e72005-09-01 18:26:17 -0400665
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400666 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
667 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
668 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
669 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
670 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500671
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400672 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
673
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200674 /* Adaptec 1430SA */
675 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
676
Mark Lord02a121d2007-12-01 13:07:22 -0500677 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800678 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
679
Mark Lord02a121d2007-12-01 13:07:22 -0500680 /* Highpoint RocketRAID PCIe series */
681 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
682 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
683
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400684 { } /* terminate list */
Brett Russ20f733e72005-09-01 18:26:17 -0400685};
686
687static struct pci_driver mv_pci_driver = {
688 .name = DRV_NAME,
689 .id_table = mv_pci_tbl,
690 .probe = mv_init_one,
691 .remove = ata_pci_remove_one,
692};
693
Jeff Garzik47c2b672005-11-12 21:13:17 -0500694static const struct mv_hw_ops mv5xxx_ops = {
695 .phy_errata = mv5_phy_errata,
696 .enable_leds = mv5_enable_leds,
697 .read_preamp = mv5_read_preamp,
698 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500699 .reset_flash = mv5_reset_flash,
700 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500701};
702
703static const struct mv_hw_ops mv6xxx_ops = {
704 .phy_errata = mv6_phy_errata,
705 .enable_leds = mv6_enable_leds,
706 .read_preamp = mv6_read_preamp,
707 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500708 .reset_flash = mv6_reset_flash,
709 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500710};
711
Brett Russ20f733e72005-09-01 18:26:17 -0400712/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500713 * module options
714 */
715static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
716
717
Jeff Garzikd88184f2007-02-26 01:26:06 -0500718/* move to PCI layer or libata core? */
719static int pci_go_64(struct pci_dev *pdev)
720{
721 int rc;
722
723 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
724 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
725 if (rc) {
726 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
727 if (rc) {
728 dev_printk(KERN_ERR, &pdev->dev,
729 "64-bit DMA enable failed\n");
730 return rc;
731 }
732 }
733 } else {
734 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
735 if (rc) {
736 dev_printk(KERN_ERR, &pdev->dev,
737 "32-bit DMA enable failed\n");
738 return rc;
739 }
740 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
741 if (rc) {
742 dev_printk(KERN_ERR, &pdev->dev,
743 "32-bit consistent DMA enable failed\n");
744 return rc;
745 }
746 }
747
748 return rc;
749}
750
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500751/*
Brett Russ20f733e72005-09-01 18:26:17 -0400752 * Functions
753 */
754
755static inline void writelfl(unsigned long data, void __iomem *addr)
756{
757 writel(data, addr);
758 (void) readl(addr); /* flush to avoid PCI posted write */
759}
760
Brett Russ20f733e72005-09-01 18:26:17 -0400761static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
762{
763 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
764}
765
Jeff Garzikc9d39132005-11-13 17:47:51 -0500766static inline unsigned int mv_hc_from_port(unsigned int port)
767{
768 return port >> MV_PORT_HC_SHIFT;
769}
770
771static inline unsigned int mv_hardport_from_port(unsigned int port)
772{
773 return port & MV_PORT_MASK;
774}
775
776static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
777 unsigned int port)
778{
779 return mv_hc_base(base, mv_hc_from_port(port));
780}
781
Brett Russ20f733e72005-09-01 18:26:17 -0400782static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
783{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500784 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500785 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500786 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e72005-09-01 18:26:17 -0400787}
788
789static inline void __iomem *mv_ap_base(struct ata_port *ap)
790{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900791 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e72005-09-01 18:26:17 -0400792}
793
Jeff Garzikcca39742006-08-24 03:19:22 -0400794static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e72005-09-01 18:26:17 -0400795{
Jeff Garzikcca39742006-08-24 03:19:22 -0400796 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e72005-09-01 18:26:17 -0400797}
798
799static void mv_irq_clear(struct ata_port *ap)
800{
801}
802
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400803static void mv_set_edma_ptrs(void __iomem *port_mmio,
804 struct mv_host_priv *hpriv,
805 struct mv_port_priv *pp)
806{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400807 u32 index;
808
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400809 /*
810 * initialize request queue
811 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400812 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
813
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400814 WARN_ON(pp->crqb_dma & 0x3ff);
815 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400816 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400817 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
818
819 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400820 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400821 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
822 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400823 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400824
825 /*
826 * initialize response queue
827 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400828 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
829
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400830 WARN_ON(pp->crpb_dma & 0xff);
831 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
832
833 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400834 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400835 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
836 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400837 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400838
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400839 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400840 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400841}
842
Brett Russ05b308e2005-10-05 17:08:53 -0400843/**
844 * mv_start_dma - Enable eDMA engine
845 * @base: port base address
846 * @pp: port private data
847 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900848 * Verify the local cache of the eDMA state is accurate with a
849 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400850 *
851 * LOCKING:
852 * Inherited from caller.
853 */
Mark Lord0c589122008-01-26 18:31:16 -0500854static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500855 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400856{
Mark Lord72109162008-01-26 18:31:33 -0500857 int want_ncq = (protocol == ATA_PROT_NCQ);
858
859 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
860 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
861 if (want_ncq != using_ncq)
862 __mv_stop_dma(ap);
863 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400864 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500865 struct mv_host_priv *hpriv = ap->host->private_data;
866 int hard_port = mv_hardport_from_port(ap->port_no);
867 void __iomem *hc_mmio = mv_hc_base_from_port(
868 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
869 u32 hc_irq_cause, ipending;
870
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400871 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500872 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400873
Mark Lord0c589122008-01-26 18:31:16 -0500874 /* clear EDMA interrupt indicator, if any */
875 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
876 ipending = (DEV_IRQ << hard_port) |
877 (CRPB_DMA_DONE << hard_port);
878 if (hc_irq_cause & ipending) {
879 writelfl(hc_irq_cause & ~ipending,
880 hc_mmio + HC_IRQ_CAUSE_OFS);
881 }
882
Mark Lord72109162008-01-26 18:31:33 -0500883 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500884
885 /* clear FIS IRQ Cause */
886 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
887
Mark Lordf630d562008-01-26 18:31:00 -0500888 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400889
Mark Lordf630d562008-01-26 18:31:00 -0500890 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400891 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
892 }
Mark Lordf630d562008-01-26 18:31:00 -0500893 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400894}
895
Brett Russ05b308e2005-10-05 17:08:53 -0400896/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400897 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400898 * @ap: ATA channel to manipulate
899 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900900 * Verify the local cache of the eDMA state is accurate with a
901 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400902 *
903 * LOCKING:
904 * Inherited from caller.
905 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400906static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400907{
908 void __iomem *port_mmio = mv_ap_base(ap);
909 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400910 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400911 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400912
Jeff Garzik4537deb52007-07-12 14:30:19 -0400913 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400914 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400915 */
Brett Russ31961942005-09-30 01:36:00 -0400916 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
917 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400918 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900919 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400920 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500921
Brett Russ31961942005-09-30 01:36:00 -0400922 /* now properly wait for the eDMA to stop */
923 for (i = 1000; i > 0; i--) {
924 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb52007-07-12 14:30:19 -0400925 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400926 break;
Jeff Garzik4537deb52007-07-12 14:30:19 -0400927
Brett Russ31961942005-09-30 01:36:00 -0400928 udelay(100);
929 }
930
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400931 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900932 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400933 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400934 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400935
936 return err;
Brett Russ31961942005-09-30 01:36:00 -0400937}
938
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400939static int mv_stop_dma(struct ata_port *ap)
940{
941 unsigned long flags;
942 int rc;
943
944 spin_lock_irqsave(&ap->host->lock, flags);
945 rc = __mv_stop_dma(ap);
946 spin_unlock_irqrestore(&ap->host->lock, flags);
947
948 return rc;
949}
950
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400951#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400952static void mv_dump_mem(void __iomem *start, unsigned bytes)
953{
Brett Russ31961942005-09-30 01:36:00 -0400954 int b, w;
955 for (b = 0; b < bytes; ) {
956 DPRINTK("%p: ", start + b);
957 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400958 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400959 b += sizeof(u32);
960 }
961 printk("\n");
962 }
Brett Russ31961942005-09-30 01:36:00 -0400963}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400964#endif
965
Brett Russ31961942005-09-30 01:36:00 -0400966static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
967{
968#ifdef ATA_DEBUG
969 int b, w;
970 u32 dw;
971 for (b = 0; b < bytes; ) {
972 DPRINTK("%02x: ", b);
973 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400974 (void) pci_read_config_dword(pdev, b, &dw);
975 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400976 b += sizeof(u32);
977 }
978 printk("\n");
979 }
980#endif
981}
982static void mv_dump_all_regs(void __iomem *mmio_base, int port,
983 struct pci_dev *pdev)
984{
985#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500986 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400987 port >> MV_PORT_HC_SHIFT);
988 void __iomem *port_base;
989 int start_port, num_ports, p, start_hc, num_hcs, hc;
990
991 if (0 > port) {
992 start_hc = start_port = 0;
993 num_ports = 8; /* shld be benign for 4 port devs */
994 num_hcs = 2;
995 } else {
996 start_hc = port >> MV_PORT_HC_SHIFT;
997 start_port = port;
998 num_ports = num_hcs = 1;
999 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001000 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -04001001 num_ports > 1 ? num_ports - 1 : start_port);
1002
1003 if (NULL != pdev) {
1004 DPRINTK("PCI config space regs:\n");
1005 mv_dump_pci_cfg(pdev, 0x68);
1006 }
1007 DPRINTK("PCI regs:\n");
1008 mv_dump_mem(mmio_base+0xc00, 0x3c);
1009 mv_dump_mem(mmio_base+0xd00, 0x34);
1010 mv_dump_mem(mmio_base+0xf00, 0x4);
1011 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1012 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c37e2006-04-10 23:20:22 -07001013 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -04001014 DPRINTK("HC regs (HC %i):\n", hc);
1015 mv_dump_mem(hc_base, 0x1c);
1016 }
1017 for (p = start_port; p < start_port + num_ports; p++) {
1018 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001019 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001020 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001021 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001022 mv_dump_mem(port_base+0x300, 0x60);
1023 }
1024#endif
1025}
1026
Brett Russ20f733e72005-09-01 18:26:17 -04001027static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1028{
1029 unsigned int ofs;
1030
1031 switch (sc_reg_in) {
1032 case SCR_STATUS:
1033 case SCR_CONTROL:
1034 case SCR_ERROR:
1035 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1036 break;
1037 case SCR_ACTIVE:
1038 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1039 break;
1040 default:
1041 ofs = 0xffffffffU;
1042 break;
1043 }
1044 return ofs;
1045}
1046
Tejun Heoda3dbb12007-07-16 14:29:40 +09001047static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e72005-09-01 18:26:17 -04001048{
1049 unsigned int ofs = mv_scr_offset(sc_reg_in);
1050
Tejun Heoda3dbb12007-07-16 14:29:40 +09001051 if (ofs != 0xffffffffU) {
1052 *val = readl(mv_ap_base(ap) + ofs);
1053 return 0;
1054 } else
1055 return -EINVAL;
Brett Russ20f733e72005-09-01 18:26:17 -04001056}
1057
Tejun Heoda3dbb12007-07-16 14:29:40 +09001058static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e72005-09-01 18:26:17 -04001059{
1060 unsigned int ofs = mv_scr_offset(sc_reg_in);
1061
Tejun Heoda3dbb12007-07-16 14:29:40 +09001062 if (ofs != 0xffffffffU) {
Brett Russ20f733e72005-09-01 18:26:17 -04001063 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001064 return 0;
1065 } else
1066 return -EINVAL;
Brett Russ20f733e72005-09-01 18:26:17 -04001067}
1068
Mark Lordf2738272008-01-26 18:32:29 -05001069static void mv6_dev_config(struct ata_device *adev)
1070{
1071 /*
1072 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1073 * See mv_qc_prep() for more info.
1074 */
1075 if (adev->flags & ATA_DFLAG_NCQ)
1076 if (adev->max_sectors > ATA_MAX_SECTORS)
1077 adev->max_sectors = ATA_MAX_SECTORS;
1078}
1079
Mark Lord72109162008-01-26 18:31:33 -05001080static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1081 void __iomem *port_mmio, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001082{
Mark Lord0c589122008-01-26 18:31:16 -05001083 u32 cfg;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001084
1085 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001086 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001087
Mark Lord0c589122008-01-26 18:31:16 -05001088 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001089 cfg |= (1 << 8); /* enab config burst size mask */
1090
Mark Lord0c589122008-01-26 18:31:16 -05001091 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001092 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1093
1094 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001095 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1096 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001097 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001098 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001099 }
1100
Mark Lord72109162008-01-26 18:31:33 -05001101 if (want_ncq) {
1102 cfg |= EDMA_CFG_NCQ;
1103 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1104 } else
1105 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1106
Jeff Garzike4e7b892006-01-31 12:18:41 -05001107 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1108}
1109
Mark Lordda2fa9b2008-01-26 18:32:45 -05001110static void mv_port_free_dma_mem(struct ata_port *ap)
1111{
1112 struct mv_host_priv *hpriv = ap->host->private_data;
1113 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001114 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001115
1116 if (pp->crqb) {
1117 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1118 pp->crqb = NULL;
1119 }
1120 if (pp->crpb) {
1121 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1122 pp->crpb = NULL;
1123 }
Mark Lordeb73d552008-01-29 13:24:00 -05001124 /*
1125 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1126 * For later hardware, we have one unique sg_tbl per NCQ tag.
1127 */
1128 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1129 if (pp->sg_tbl[tag]) {
1130 if (tag == 0 || !IS_GEN_I(hpriv))
1131 dma_pool_free(hpriv->sg_tbl_pool,
1132 pp->sg_tbl[tag],
1133 pp->sg_tbl_dma[tag]);
1134 pp->sg_tbl[tag] = NULL;
1135 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001136 }
1137}
1138
Brett Russ05b308e2005-10-05 17:08:53 -04001139/**
1140 * mv_port_start - Port specific init/start routine.
1141 * @ap: ATA channel to manipulate
1142 *
1143 * Allocate and point to DMA memory, init port private memory,
1144 * zero indices.
1145 *
1146 * LOCKING:
1147 * Inherited from caller.
1148 */
Brett Russ31961942005-09-30 01:36:00 -04001149static int mv_port_start(struct ata_port *ap)
1150{
Jeff Garzikcca39742006-08-24 03:19:22 -04001151 struct device *dev = ap->host->dev;
1152 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001153 struct mv_port_priv *pp;
1154 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001155 unsigned long flags;
Mark Lordeb73d552008-01-29 13:24:00 -05001156 int tag, rc;
Brett Russ31961942005-09-30 01:36:00 -04001157
Tejun Heo24dc5f32007-01-20 16:00:28 +09001158 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001159 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001160 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001161 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001162
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001163 rc = ata_pad_alloc(ap, dev);
1164 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001165 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001166
Mark Lordda2fa9b2008-01-26 18:32:45 -05001167 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1168 if (!pp->crqb)
1169 return -ENOMEM;
1170 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001171
Mark Lordda2fa9b2008-01-26 18:32:45 -05001172 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1173 if (!pp->crpb)
1174 goto out_port_free_dma_mem;
1175 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001176
Mark Lordeb73d552008-01-29 13:24:00 -05001177 /*
1178 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1179 * For later hardware, we need one unique sg_tbl per NCQ tag.
1180 */
1181 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1182 if (tag == 0 || !IS_GEN_I(hpriv)) {
1183 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1184 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1185 if (!pp->sg_tbl[tag])
1186 goto out_port_free_dma_mem;
1187 } else {
1188 pp->sg_tbl[tag] = pp->sg_tbl[0];
1189 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1190 }
1191 }
Brett Russ31961942005-09-30 01:36:00 -04001192
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001193 spin_lock_irqsave(&ap->host->lock, flags);
1194
Mark Lord72109162008-01-26 18:31:33 -05001195 mv_edma_cfg(pp, hpriv, port_mmio, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001196 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001197
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001198 spin_unlock_irqrestore(&ap->host->lock, flags);
1199
Brett Russ31961942005-09-30 01:36:00 -04001200 /* Don't turn on EDMA here...do it before DMA commands only. Else
1201 * we'll be unable to send non-data, PIO, etc due to restricted access
1202 * to shadow regs.
1203 */
Brett Russ31961942005-09-30 01:36:00 -04001204 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001205
1206out_port_free_dma_mem:
1207 mv_port_free_dma_mem(ap);
1208 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001209}
1210
Brett Russ05b308e2005-10-05 17:08:53 -04001211/**
1212 * mv_port_stop - Port specific cleanup/stop routine.
1213 * @ap: ATA channel to manipulate
1214 *
1215 * Stop DMA, cleanup port memory.
1216 *
1217 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001218 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001219 */
Brett Russ31961942005-09-30 01:36:00 -04001220static void mv_port_stop(struct ata_port *ap)
1221{
Brett Russ31961942005-09-30 01:36:00 -04001222 mv_stop_dma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001223 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001224}
1225
Brett Russ05b308e2005-10-05 17:08:53 -04001226/**
1227 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1228 * @qc: queued command whose SG list to source from
1229 *
1230 * Populate the SG list and mark the last entry.
1231 *
1232 * LOCKING:
1233 * Inherited from caller.
1234 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001235static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001236{
1237 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001238 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001239 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001240 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001241
Mark Lordeb73d552008-01-29 13:24:00 -05001242 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001243 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001244 dma_addr_t addr = sg_dma_address(sg);
1245 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001246
Olof Johansson4007b492007-10-02 20:45:27 -05001247 while (sg_len) {
1248 u32 offset = addr & 0xffff;
1249 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001250
Olof Johansson4007b492007-10-02 20:45:27 -05001251 if ((offset + sg_len > 0x10000))
1252 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001253
Olof Johansson4007b492007-10-02 20:45:27 -05001254 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1255 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001256 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001257
1258 sg_len -= len;
1259 addr += len;
1260
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001261 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001262 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001263 }
Brett Russ31961942005-09-30 01:36:00 -04001264 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001265
1266 if (likely(last_sg))
1267 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001268}
1269
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001270static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001271{
Mark Lord559eeda2006-05-19 16:40:15 -04001272 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001273 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001274 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001275}
1276
Brett Russ05b308e2005-10-05 17:08:53 -04001277/**
1278 * mv_qc_prep - Host specific command preparation.
1279 * @qc: queued command to prepare
1280 *
1281 * This routine simply redirects to the general purpose routine
1282 * if command is not DMA. Else, it handles prep of the CRQB
1283 * (command request block), does some sanity checking, and calls
1284 * the SG load routine.
1285 *
1286 * LOCKING:
1287 * Inherited from caller.
1288 */
Brett Russ31961942005-09-30 01:36:00 -04001289static void mv_qc_prep(struct ata_queued_cmd *qc)
1290{
1291 struct ata_port *ap = qc->ap;
1292 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001293 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001294 struct ata_taskfile *tf;
1295 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001296 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001297
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001298 if (qc->tf.protocol != ATA_PROT_DMA)
Brett Russ31961942005-09-30 01:36:00 -04001299 return;
Brett Russ20f733e72005-09-01 18:26:17 -04001300
Brett Russ31961942005-09-30 01:36:00 -04001301 /* Fill in command request block
1302 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001303 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001304 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001305 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001306 flags |= qc->tag << CRQB_TAG_SHIFT;
1307
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001308 /* get current queue index from software */
1309 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001310
Mark Lorda6432432006-05-19 16:36:36 -04001311 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001312 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001313 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001314 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001315 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1316
1317 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001318 tf = &qc->tf;
1319
1320 /* Sadly, the CRQB cannot accomodate all registers--there are
1321 * only 11 bytes...so we must pick and choose required
1322 * registers based on the command. So, we drop feature and
1323 * hob_feature for [RW] DMA commands, but they are needed for
1324 * NCQ. NCQ will drop hob_nsect.
1325 */
1326 switch (tf->command) {
1327 case ATA_CMD_READ:
1328 case ATA_CMD_READ_EXT:
1329 case ATA_CMD_WRITE:
1330 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001331 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001332 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1333 break;
1334#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1335 case ATA_CMD_FPDMA_READ:
1336 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001337 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001338 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1339 break;
1340#endif /* FIXME: remove this line when NCQ added */
1341 default:
1342 /* The only other commands EDMA supports in non-queued and
1343 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1344 * of which are defined/used by Linux. If we get here, this
1345 * driver needs work.
1346 *
1347 * FIXME: modify libata to give qc_prep a return value and
1348 * return error here.
1349 */
1350 BUG_ON(tf->command);
1351 break;
1352 }
1353 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1354 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1355 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1356 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1357 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1358 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1359 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1360 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1361 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1362
Jeff Garzike4e7b892006-01-31 12:18:41 -05001363 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001364 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001365 mv_fill_sg(qc);
1366}
1367
1368/**
1369 * mv_qc_prep_iie - Host specific command preparation.
1370 * @qc: queued command to prepare
1371 *
1372 * This routine simply redirects to the general purpose routine
1373 * if command is not DMA. Else, it handles prep of the CRQB
1374 * (command request block), does some sanity checking, and calls
1375 * the SG load routine.
1376 *
1377 * LOCKING:
1378 * Inherited from caller.
1379 */
1380static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1381{
1382 struct ata_port *ap = qc->ap;
1383 struct mv_port_priv *pp = ap->private_data;
1384 struct mv_crqb_iie *crqb;
1385 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001386 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001387 u32 flags = 0;
1388
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001389 if (qc->tf.protocol != ATA_PROT_DMA)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001390 return;
1391
Jeff Garzike4e7b892006-01-31 12:18:41 -05001392 /* Fill in Gen IIE command request block
1393 */
1394 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1395 flags |= CRQB_FLAG_READ;
1396
Tejun Heobeec7db2006-02-11 19:11:13 +09001397 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001398 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001399 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001400
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001401 /* get current queue index from software */
1402 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001403
1404 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001405 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1406 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001407 crqb->flags = cpu_to_le32(flags);
1408
1409 tf = &qc->tf;
1410 crqb->ata_cmd[0] = cpu_to_le32(
1411 (tf->command << 16) |
1412 (tf->feature << 24)
1413 );
1414 crqb->ata_cmd[1] = cpu_to_le32(
1415 (tf->lbal << 0) |
1416 (tf->lbam << 8) |
1417 (tf->lbah << 16) |
1418 (tf->device << 24)
1419 );
1420 crqb->ata_cmd[2] = cpu_to_le32(
1421 (tf->hob_lbal << 0) |
1422 (tf->hob_lbam << 8) |
1423 (tf->hob_lbah << 16) |
1424 (tf->hob_feature << 24)
1425 );
1426 crqb->ata_cmd[3] = cpu_to_le32(
1427 (tf->nsect << 0) |
1428 (tf->hob_nsect << 8)
1429 );
1430
1431 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1432 return;
Brett Russ31961942005-09-30 01:36:00 -04001433 mv_fill_sg(qc);
1434}
1435
Brett Russ05b308e2005-10-05 17:08:53 -04001436/**
1437 * mv_qc_issue - Initiate a command to the host
1438 * @qc: queued command to start
1439 *
1440 * This routine simply redirects to the general purpose routine
1441 * if command is not DMA. Else, it sanity checks our local
1442 * caches of the request producer/consumer indices then enables
1443 * DMA and bumps the request producer index.
1444 *
1445 * LOCKING:
1446 * Inherited from caller.
1447 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001448static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001449{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001450 struct ata_port *ap = qc->ap;
1451 void __iomem *port_mmio = mv_ap_base(ap);
1452 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001453 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001454
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001455 if (qc->tf.protocol != ATA_PROT_DMA) {
Brett Russ31961942005-09-30 01:36:00 -04001456 /* We're about to send a non-EDMA capable command to the
1457 * port. Turn off EDMA so there won't be problems accessing
1458 * shadow block, etc registers.
1459 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001460 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001461 return ata_qc_issue_prot(qc);
1462 }
1463
Mark Lord72109162008-01-26 18:31:33 -05001464 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001465
1466 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001467
Brett Russ31961942005-09-30 01:36:00 -04001468 /* until we do queuing, the queue should be empty at this point */
Mark Lorda6432432006-05-19 16:36:36 -04001469 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1470 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
Brett Russ31961942005-09-30 01:36:00 -04001471
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001472 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001473
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001474 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001475
1476 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001477 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1478 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001479
1480 return 0;
1481}
1482
Brett Russ05b308e2005-10-05 17:08:53 -04001483/**
Brett Russ05b308e2005-10-05 17:08:53 -04001484 * mv_err_intr - Handle error interrupts on the port
1485 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001486 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001487 *
1488 * In most cases, just clear the interrupt and move on. However,
1489 * some cases require an eDMA reset, which is done right before
1490 * the COMRESET in mv_phy_reset(). The SERR case requires a
1491 * clear of pending errors in the SATA SERROR register. Finally,
1492 * if the port disabled DMA, update our cached copy to match.
1493 *
1494 * LOCKING:
1495 * Inherited from caller.
1496 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001497static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e72005-09-01 18:26:17 -04001498{
Brett Russ31961942005-09-30 01:36:00 -04001499 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001500 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1501 struct mv_port_priv *pp = ap->private_data;
1502 struct mv_host_priv *hpriv = ap->host->private_data;
1503 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1504 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001505 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e72005-09-01 18:26:17 -04001506
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001507 ata_ehi_clear_desc(ehi);
Brett Russ20f733e72005-09-01 18:26:17 -04001508
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001509 if (!edma_enabled) {
1510 /* just a guess: do we need to do this? should we
1511 * expand this, and do it in all cases?
1512 */
Tejun Heo936fd732007-08-06 18:36:23 +09001513 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1514 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e72005-09-01 18:26:17 -04001515 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001516
1517 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1518
1519 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1520
1521 /*
1522 * all generations share these EDMA error cause bits
1523 */
1524
1525 if (edma_err_cause & EDMA_ERR_DEV)
1526 err_mask |= AC_ERR_DEV;
1527 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001528 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001529 EDMA_ERR_INTRL_PAR)) {
1530 err_mask |= AC_ERR_ATA_BUS;
1531 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001532 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001533 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001534 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1535 ata_ehi_hotplugged(ehi);
1536 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001537 "dev disconnect" : "dev connect");
Mark Lord3606a382008-01-26 18:28:23 -05001538 action |= ATA_EH_HARDRESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001539 }
1540
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001541 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001542 eh_freeze_mask = EDMA_EH_FREEZE_5;
1543
1544 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1545 struct mv_port_priv *pp = ap->private_data;
1546 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001547 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001548 }
1549 } else {
1550 eh_freeze_mask = EDMA_EH_FREEZE;
1551
1552 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1553 struct mv_port_priv *pp = ap->private_data;
1554 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001555 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001556 }
1557
1558 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001559 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1560 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001561 err_mask = AC_ERR_ATA_BUS;
1562 action |= ATA_EH_HARDRESET;
1563 }
1564 }
Brett Russ20f733e72005-09-01 18:26:17 -04001565
1566 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001567 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e72005-09-01 18:26:17 -04001568
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001569 if (!err_mask) {
1570 err_mask = AC_ERR_OTHER;
1571 action |= ATA_EH_HARDRESET;
1572 }
1573
1574 ehi->serror |= serr;
1575 ehi->action |= action;
1576
1577 if (qc)
1578 qc->err_mask |= err_mask;
1579 else
1580 ehi->err_mask |= err_mask;
1581
1582 if (edma_err_cause & eh_freeze_mask)
1583 ata_port_freeze(ap);
1584 else
1585 ata_port_abort(ap);
1586}
1587
1588static void mv_intr_pio(struct ata_port *ap)
1589{
1590 struct ata_queued_cmd *qc;
1591 u8 ata_status;
1592
1593 /* ignore spurious intr if drive still BUSY */
1594 ata_status = readb(ap->ioaddr.status_addr);
1595 if (unlikely(ata_status & ATA_BUSY))
1596 return;
1597
1598 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001599 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001600 if (unlikely(!qc)) /* no active tag */
1601 return;
1602 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1603 return;
1604
1605 /* and finally, complete the ATA command */
1606 qc->err_mask |= ac_err_mask(ata_status);
1607 ata_qc_complete(qc);
1608}
1609
1610static void mv_intr_edma(struct ata_port *ap)
1611{
1612 void __iomem *port_mmio = mv_ap_base(ap);
1613 struct mv_host_priv *hpriv = ap->host->private_data;
1614 struct mv_port_priv *pp = ap->private_data;
1615 struct ata_queued_cmd *qc;
1616 u32 out_index, in_index;
1617 bool work_done = false;
1618
1619 /* get h/w response queue pointer */
1620 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1621 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1622
1623 while (1) {
1624 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001625 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001626
1627 /* get s/w response queue last-read pointer, and compare */
1628 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1629 if (in_index == out_index)
1630 break;
1631
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001632 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001633 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001634 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001635
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001636 /* Gen II/IIE: get active ATA command via tag, to enable
1637 * support for queueing. this works transparently for
1638 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001639 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001640 else
1641 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001642
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001643 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001644
Mark Lordcb924412008-01-26 18:32:09 -05001645 /* For non-NCQ mode, the lower 8 bits of status
1646 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1647 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001648 */
1649 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001650 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001651 mv_err_intr(ap, qc);
1652 return;
1653 }
1654
1655 /* and finally, complete the ATA command */
1656 if (qc) {
1657 qc->err_mask |=
1658 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1659 ata_qc_complete(qc);
1660 }
1661
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001662 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001663 * indicate (after the loop completes) to hardware
1664 * that we have consumed a response queue entry.
1665 */
1666 work_done = true;
1667 pp->resp_idx++;
1668 }
1669
1670 if (work_done)
1671 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1672 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1673 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e72005-09-01 18:26:17 -04001674}
1675
Brett Russ05b308e2005-10-05 17:08:53 -04001676/**
1677 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001678 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001679 * @relevant: port error bits relevant to this host controller
1680 * @hc: which host controller we're to look at
1681 *
1682 * Read then write clear the HC interrupt status then walk each
1683 * port connected to the HC and see if it needs servicing. Port
1684 * success ints are reported in the HC interrupt status reg, the
1685 * port error ints are reported in the higher level main
1686 * interrupt status register and thus are passed in via the
1687 * 'relevant' argument.
1688 *
1689 * LOCKING:
1690 * Inherited from caller.
1691 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001692static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e72005-09-01 18:26:17 -04001693{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001694 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e72005-09-01 18:26:17 -04001695 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e72005-09-01 18:26:17 -04001696 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001697 int port, port0;
Brett Russ20f733e72005-09-01 18:26:17 -04001698
Jeff Garzik35177262007-02-24 21:26:42 -05001699 if (hc == 0)
Brett Russ20f733e72005-09-01 18:26:17 -04001700 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001701 else
Brett Russ20f733e72005-09-01 18:26:17 -04001702 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e72005-09-01 18:26:17 -04001703
1704 /* we'll need the HC success int register in most cases */
1705 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001706 if (!hc_irq_cause)
1707 return;
1708
1709 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e72005-09-01 18:26:17 -04001710
1711 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001712 hc, relevant, hc_irq_cause);
Brett Russ20f733e72005-09-01 18:26:17 -04001713
1714 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001715 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001716 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001717 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001718
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001719 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001720 continue;
1721
Brett Russ31961942005-09-30 01:36:00 -04001722 shift = port << 1; /* (port * 2) */
Brett Russ20f733e72005-09-01 18:26:17 -04001723 if (port >= MV_PORTS_PER_HC) {
1724 shift++; /* skip bit 8 in the HC Main IRQ reg */
1725 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001726 have_err_bits = ((PORT0_ERR << shift) & relevant);
1727
1728 if (unlikely(have_err_bits)) {
1729 struct ata_queued_cmd *qc;
1730
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001731 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001732 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1733 continue;
1734
1735 mv_err_intr(ap, qc);
1736 continue;
Brett Russ20f733e72005-09-01 18:26:17 -04001737 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001738
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001739 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1740
1741 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1742 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1743 mv_intr_edma(ap);
1744 } else {
1745 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1746 mv_intr_pio(ap);
Brett Russ20f733e72005-09-01 18:26:17 -04001747 }
1748 }
1749 VPRINTK("EXIT\n");
1750}
1751
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001752static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1753{
Mark Lord02a121d2007-12-01 13:07:22 -05001754 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001755 struct ata_port *ap;
1756 struct ata_queued_cmd *qc;
1757 struct ata_eh_info *ehi;
1758 unsigned int i, err_mask, printed = 0;
1759 u32 err_cause;
1760
Mark Lord02a121d2007-12-01 13:07:22 -05001761 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001762
1763 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1764 err_cause);
1765
1766 DPRINTK("All regs @ PCI error\n");
1767 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1768
Mark Lord02a121d2007-12-01 13:07:22 -05001769 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001770
1771 for (i = 0; i < host->n_ports; i++) {
1772 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001773 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001774 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001775 ata_ehi_clear_desc(ehi);
1776 if (!printed++)
1777 ata_ehi_push_desc(ehi,
1778 "PCI err cause 0x%08x", err_cause);
1779 err_mask = AC_ERR_HOST_BUS;
1780 ehi->action = ATA_EH_HARDRESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001781 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001782 if (qc)
1783 qc->err_mask |= err_mask;
1784 else
1785 ehi->err_mask |= err_mask;
1786
1787 ata_port_freeze(ap);
1788 }
1789 }
1790}
1791
Brett Russ05b308e2005-10-05 17:08:53 -04001792/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001793 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001794 * @irq: unused
1795 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001796 *
1797 * Read the read only register to determine if any host
1798 * controllers have pending interrupts. If so, call lower level
1799 * routine to handle. Also check for PCI errors which are only
1800 * reported here.
1801 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001802 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001803 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001804 * interrupts.
1805 */
David Howells7d12e782006-10-05 14:55:46 +01001806static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e72005-09-01 18:26:17 -04001807{
Jeff Garzikcca39742006-08-24 03:19:22 -04001808 struct ata_host *host = dev_instance;
Brett Russ20f733e72005-09-01 18:26:17 -04001809 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001810 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Mark Lord646a4da2008-01-26 18:30:37 -05001811 u32 irq_stat, irq_mask;
Brett Russ20f733e72005-09-01 18:26:17 -04001812
Mark Lord646a4da2008-01-26 18:30:37 -05001813 spin_lock(&host->lock);
Brett Russ20f733e72005-09-01 18:26:17 -04001814 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Mark Lord646a4da2008-01-26 18:30:37 -05001815 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e72005-09-01 18:26:17 -04001816
1817 /* check the cases where we either have nothing pending or have read
1818 * a bogus register value which can indicate HW removal or PCI fault
1819 */
Mark Lord646a4da2008-01-26 18:30:37 -05001820 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1821 goto out_unlock;
Brett Russ20f733e72005-09-01 18:26:17 -04001822
Jeff Garzikcca39742006-08-24 03:19:22 -04001823 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e72005-09-01 18:26:17 -04001824
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001825 if (unlikely(irq_stat & PCI_ERR)) {
1826 mv_pci_error(host, mmio);
1827 handled = 1;
1828 goto out_unlock; /* skip all other HC irq handling */
1829 }
1830
Brett Russ20f733e72005-09-01 18:26:17 -04001831 for (hc = 0; hc < n_hcs; hc++) {
1832 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1833 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001834 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001835 handled = 1;
Brett Russ20f733e72005-09-01 18:26:17 -04001836 }
1837 }
Mark Lord615ab952006-05-19 16:24:56 -04001838
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001839out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001840 spin_unlock(&host->lock);
Brett Russ20f733e72005-09-01 18:26:17 -04001841
1842 return IRQ_RETVAL(handled);
1843}
1844
Jeff Garzikc9d39132005-11-13 17:47:51 -05001845static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1846{
1847 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1848 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1849
1850 return hc_mmio + ofs;
1851}
1852
1853static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1854{
1855 unsigned int ofs;
1856
1857 switch (sc_reg_in) {
1858 case SCR_STATUS:
1859 case SCR_ERROR:
1860 case SCR_CONTROL:
1861 ofs = sc_reg_in * sizeof(u32);
1862 break;
1863 default:
1864 ofs = 0xffffffffU;
1865 break;
1866 }
1867 return ofs;
1868}
1869
Tejun Heoda3dbb12007-07-16 14:29:40 +09001870static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001871{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001872 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1873 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001874 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1875
Tejun Heoda3dbb12007-07-16 14:29:40 +09001876 if (ofs != 0xffffffffU) {
1877 *val = readl(addr + ofs);
1878 return 0;
1879 } else
1880 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001881}
1882
Tejun Heoda3dbb12007-07-16 14:29:40 +09001883static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001884{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001885 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1886 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001887 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1888
Tejun Heoda3dbb12007-07-16 14:29:40 +09001889 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001890 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001891 return 0;
1892 } else
1893 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001894}
1895
Jeff Garzik522479f2005-11-12 22:14:02 -05001896static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1897{
Jeff Garzik522479f2005-11-12 22:14:02 -05001898 int early_5080;
1899
Auke Kok44c10132007-06-08 15:46:36 -07001900 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001901
1902 if (!early_5080) {
1903 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1904 tmp |= (1 << 0);
1905 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1906 }
1907
1908 mv_reset_pci_bus(pdev, mmio);
1909}
1910
1911static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1912{
1913 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1914}
1915
Jeff Garzik47c2b672005-11-12 21:13:17 -05001916static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001917 void __iomem *mmio)
1918{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001919 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1920 u32 tmp;
1921
1922 tmp = readl(phy_mmio + MV5_PHY_MODE);
1923
1924 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1925 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001926}
1927
Jeff Garzik47c2b672005-11-12 21:13:17 -05001928static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001929{
Jeff Garzik522479f2005-11-12 22:14:02 -05001930 u32 tmp;
1931
1932 writel(0, mmio + MV_GPIO_PORT_CTL);
1933
1934 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1935
1936 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1937 tmp |= ~(1 << 0);
1938 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001939}
1940
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001941static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1942 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001943{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001944 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1945 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1946 u32 tmp;
1947 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1948
1949 if (fix_apm_sq) {
1950 tmp = readl(phy_mmio + MV5_LT_MODE);
1951 tmp |= (1 << 19);
1952 writel(tmp, phy_mmio + MV5_LT_MODE);
1953
1954 tmp = readl(phy_mmio + MV5_PHY_CTL);
1955 tmp &= ~0x3;
1956 tmp |= 0x1;
1957 writel(tmp, phy_mmio + MV5_PHY_CTL);
1958 }
1959
1960 tmp = readl(phy_mmio + MV5_PHY_MODE);
1961 tmp &= ~mask;
1962 tmp |= hpriv->signal[port].pre;
1963 tmp |= hpriv->signal[port].amps;
1964 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001965}
1966
Jeff Garzikc9d39132005-11-13 17:47:51 -05001967
1968#undef ZERO
1969#define ZERO(reg) writel(0, port_mmio + (reg))
1970static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1971 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001972{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001973 void __iomem *port_mmio = mv_port_base(mmio, port);
1974
1975 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1976
1977 mv_channel_reset(hpriv, mmio, port);
1978
1979 ZERO(0x028); /* command */
1980 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1981 ZERO(0x004); /* timer */
1982 ZERO(0x008); /* irq err cause */
1983 ZERO(0x00c); /* irq err mask */
1984 ZERO(0x010); /* rq bah */
1985 ZERO(0x014); /* rq inp */
1986 ZERO(0x018); /* rq outp */
1987 ZERO(0x01c); /* respq bah */
1988 ZERO(0x024); /* respq outp */
1989 ZERO(0x020); /* respq inp */
1990 ZERO(0x02c); /* test control */
1991 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1992}
1993#undef ZERO
1994
1995#define ZERO(reg) writel(0, hc_mmio + (reg))
1996static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1997 unsigned int hc)
1998{
1999 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2000 u32 tmp;
2001
2002 ZERO(0x00c);
2003 ZERO(0x010);
2004 ZERO(0x014);
2005 ZERO(0x018);
2006
2007 tmp = readl(hc_mmio + 0x20);
2008 tmp &= 0x1c1c1c1c;
2009 tmp |= 0x03030303;
2010 writel(tmp, hc_mmio + 0x20);
2011}
2012#undef ZERO
2013
2014static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2015 unsigned int n_hc)
2016{
2017 unsigned int hc, port;
2018
2019 for (hc = 0; hc < n_hc; hc++) {
2020 for (port = 0; port < MV_PORTS_PER_HC; port++)
2021 mv5_reset_hc_port(hpriv, mmio,
2022 (hc * MV_PORTS_PER_HC) + port);
2023
2024 mv5_reset_one_hc(hpriv, mmio, hc);
2025 }
2026
2027 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002028}
2029
Jeff Garzik101ffae2005-11-12 22:17:49 -05002030#undef ZERO
2031#define ZERO(reg) writel(0, mmio + (reg))
2032static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
2033{
Mark Lord02a121d2007-12-01 13:07:22 -05002034 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2035 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002036 u32 tmp;
2037
2038 tmp = readl(mmio + MV_PCI_MODE);
2039 tmp &= 0xff00ffff;
2040 writel(tmp, mmio + MV_PCI_MODE);
2041
2042 ZERO(MV_PCI_DISC_TIMER);
2043 ZERO(MV_PCI_MSI_TRIGGER);
2044 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2045 ZERO(HC_MAIN_IRQ_MASK_OFS);
2046 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002047 ZERO(hpriv->irq_cause_ofs);
2048 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002049 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2050 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2051 ZERO(MV_PCI_ERR_ATTRIBUTE);
2052 ZERO(MV_PCI_ERR_COMMAND);
2053}
2054#undef ZERO
2055
2056static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2057{
2058 u32 tmp;
2059
2060 mv5_reset_flash(hpriv, mmio);
2061
2062 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2063 tmp &= 0x3;
2064 tmp |= (1 << 5) | (1 << 6);
2065 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2066}
2067
2068/**
2069 * mv6_reset_hc - Perform the 6xxx global soft reset
2070 * @mmio: base address of the HBA
2071 *
2072 * This routine only applies to 6xxx parts.
2073 *
2074 * LOCKING:
2075 * Inherited from caller.
2076 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002077static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2078 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002079{
2080 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2081 int i, rc = 0;
2082 u32 t;
2083
2084 /* Following procedure defined in PCI "main command and status
2085 * register" table.
2086 */
2087 t = readl(reg);
2088 writel(t | STOP_PCI_MASTER, reg);
2089
2090 for (i = 0; i < 1000; i++) {
2091 udelay(1);
2092 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002093 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002094 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002095 }
2096 if (!(PCI_MASTER_EMPTY & t)) {
2097 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2098 rc = 1;
2099 goto done;
2100 }
2101
2102 /* set reset */
2103 i = 5;
2104 do {
2105 writel(t | GLOB_SFT_RST, reg);
2106 t = readl(reg);
2107 udelay(1);
2108 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2109
2110 if (!(GLOB_SFT_RST & t)) {
2111 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2112 rc = 1;
2113 goto done;
2114 }
2115
2116 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2117 i = 5;
2118 do {
2119 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2120 t = readl(reg);
2121 udelay(1);
2122 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2123
2124 if (GLOB_SFT_RST & t) {
2125 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2126 rc = 1;
2127 }
2128done:
2129 return rc;
2130}
2131
Jeff Garzik47c2b672005-11-12 21:13:17 -05002132static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002133 void __iomem *mmio)
2134{
2135 void __iomem *port_mmio;
2136 u32 tmp;
2137
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002138 tmp = readl(mmio + MV_RESET_CFG);
2139 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002140 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002141 hpriv->signal[idx].pre = 0x1 << 5;
2142 return;
2143 }
2144
2145 port_mmio = mv_port_base(mmio, idx);
2146 tmp = readl(port_mmio + PHY_MODE2);
2147
2148 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2149 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2150}
2151
Jeff Garzik47c2b672005-11-12 21:13:17 -05002152static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002153{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002154 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002155}
2156
Jeff Garzikc9d39132005-11-13 17:47:51 -05002157static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002158 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002159{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002160 void __iomem *port_mmio = mv_port_base(mmio, port);
2161
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002162 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002163 int fix_phy_mode2 =
2164 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002165 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002166 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2167 u32 m2, tmp;
2168
2169 if (fix_phy_mode2) {
2170 m2 = readl(port_mmio + PHY_MODE2);
2171 m2 &= ~(1 << 16);
2172 m2 |= (1 << 31);
2173 writel(m2, port_mmio + PHY_MODE2);
2174
2175 udelay(200);
2176
2177 m2 = readl(port_mmio + PHY_MODE2);
2178 m2 &= ~((1 << 16) | (1 << 31));
2179 writel(m2, port_mmio + PHY_MODE2);
2180
2181 udelay(200);
2182 }
2183
2184 /* who knows what this magic does */
2185 tmp = readl(port_mmio + PHY_MODE3);
2186 tmp &= ~0x7F800000;
2187 tmp |= 0x2A800000;
2188 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002189
2190 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002191 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002192
2193 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002194
2195 if (hp_flags & MV_HP_ERRATA_60X1B2)
2196 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002197
2198 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2199
2200 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002201
2202 if (hp_flags & MV_HP_ERRATA_60X1B2)
2203 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002204 }
2205
2206 /* Revert values of pre-emphasis and signal amps to the saved ones */
2207 m2 = readl(port_mmio + PHY_MODE2);
2208
2209 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002210 m2 |= hpriv->signal[port].amps;
2211 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002212 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002213
Jeff Garzike4e7b892006-01-31 12:18:41 -05002214 /* according to mvSata 3.6.1, some IIE values are fixed */
2215 if (IS_GEN_IIE(hpriv)) {
2216 m2 &= ~0xC30FF01F;
2217 m2 |= 0x0000900F;
2218 }
2219
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002220 writel(m2, port_mmio + PHY_MODE2);
2221}
2222
Jeff Garzikc9d39132005-11-13 17:47:51 -05002223static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2224 unsigned int port_no)
Brett Russ20f733e72005-09-01 18:26:17 -04002225{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002226 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e72005-09-01 18:26:17 -04002227
Brett Russ31961942005-09-30 01:36:00 -04002228 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002229
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002230 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002231 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002232 ifctl |= (1 << 7); /* enable gen2i speed */
2233 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002234 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2235 }
2236
Brett Russ20f733e72005-09-01 18:26:17 -04002237 udelay(25); /* allow reset propagation */
2238
2239 /* Spec never mentions clearing the bit. Marvell's driver does
2240 * clear the bit, however.
2241 */
Brett Russ31961942005-09-30 01:36:00 -04002242 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e72005-09-01 18:26:17 -04002243
Jeff Garzikc9d39132005-11-13 17:47:51 -05002244 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2245
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002246 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002247 mdelay(1);
2248}
2249
Jeff Garzikc9d39132005-11-13 17:47:51 -05002250/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002251 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002252 * @ap: ATA channel to manipulate
2253 *
2254 * Part of this is taken from __sata_phy_reset and modified to
2255 * not sleep since this routine gets called from interrupt level.
2256 *
2257 * LOCKING:
2258 * Inherited from caller. This is coded to safe to call at
2259 * interrupt level, i.e. it does not sleep.
2260 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002261static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2262 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002263{
2264 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002265 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002266 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002267 int retry = 5;
2268 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002269
2270 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002271
Tejun Heoda3dbb12007-07-16 14:29:40 +09002272#ifdef DEBUG
2273 {
2274 u32 sstatus, serror, scontrol;
2275
2276 mv_scr_read(ap, SCR_STATUS, &sstatus);
2277 mv_scr_read(ap, SCR_ERROR, &serror);
2278 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2279 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002280 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002281 }
2282#endif
Brett Russ20f733e72005-09-01 18:26:17 -04002283
Jeff Garzik22374672005-11-17 10:59:48 -05002284 /* Issue COMRESET via SControl */
2285comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002286 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002287 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002288
Tejun Heo936fd732007-08-06 18:36:23 +09002289 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002290 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002291
Brett Russ31961942005-09-30 01:36:00 -04002292 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002293 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002294 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002295 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002296
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002297 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002298 } while (time_before(jiffies, deadline));
Brett Russ20f733e72005-09-01 18:26:17 -04002299
Jeff Garzik22374672005-11-17 10:59:48 -05002300 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002301 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002302 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2303 (retry-- > 0))
2304 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002305
Tejun Heoda3dbb12007-07-16 14:29:40 +09002306#ifdef DEBUG
2307 {
2308 u32 sstatus, serror, scontrol;
2309
2310 mv_scr_read(ap, SCR_STATUS, &sstatus);
2311 mv_scr_read(ap, SCR_ERROR, &serror);
2312 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2313 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2314 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2315 }
2316#endif
Brett Russ31961942005-09-30 01:36:00 -04002317
Tejun Heo936fd732007-08-06 18:36:23 +09002318 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002319 *class = ATA_DEV_NONE;
Brett Russ20f733e72005-09-01 18:26:17 -04002320 return;
2321 }
2322
Jeff Garzik22374672005-11-17 10:59:48 -05002323 /* even after SStatus reflects that device is ready,
2324 * it seems to take a while for link to be fully
2325 * established (and thus Status no longer 0x80/0x7F),
2326 * so we poll a bit for that, here.
2327 */
2328 retry = 20;
2329 while (1) {
2330 u8 drv_stat = ata_check_status(ap);
2331 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2332 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002333 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002334 if (retry-- <= 0)
2335 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002336 if (time_after(jiffies, deadline))
2337 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002338 }
2339
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002340 /* FIXME: if we passed the deadline, the following
2341 * code probably produces an invalid result
2342 */
Brett Russ20f733e72005-09-01 18:26:17 -04002343
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002344 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002345 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002346
2347 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2348
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002349 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002350
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002351 VPRINTK("EXIT\n");
Brett Russ20f733e72005-09-01 18:26:17 -04002352}
2353
Tejun Heocc0680a2007-08-06 18:36:23 +09002354static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002355{
Tejun Heocc0680a2007-08-06 18:36:23 +09002356 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002357 struct mv_port_priv *pp = ap->private_data;
Tejun Heocc0680a2007-08-06 18:36:23 +09002358 struct ata_eh_context *ehc = &link->eh_context;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002359 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002360
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002361 rc = mv_stop_dma(ap);
2362 if (rc)
2363 ehc->i.action |= ATA_EH_HARDRESET;
2364
2365 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2366 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2367 ehc->i.action |= ATA_EH_HARDRESET;
2368 }
2369
2370 /* if we're about to do hardreset, nothing more to do */
2371 if (ehc->i.action & ATA_EH_HARDRESET)
2372 return 0;
2373
Tejun Heocc0680a2007-08-06 18:36:23 +09002374 if (ata_link_online(link))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002375 rc = ata_wait_ready(ap, deadline);
2376 else
2377 rc = -ENODEV;
2378
2379 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002380}
2381
Tejun Heocc0680a2007-08-06 18:36:23 +09002382static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002383 unsigned long deadline)
2384{
Tejun Heocc0680a2007-08-06 18:36:23 +09002385 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002386 struct mv_host_priv *hpriv = ap->host->private_data;
2387 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2388
2389 mv_stop_dma(ap);
2390
2391 mv_channel_reset(hpriv, mmio, ap->port_no);
2392
2393 mv_phy_reset(ap, class, deadline);
2394
2395 return 0;
2396}
2397
Tejun Heocc0680a2007-08-06 18:36:23 +09002398static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002399{
Tejun Heocc0680a2007-08-06 18:36:23 +09002400 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002401 u32 serr;
2402
2403 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002404 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002405
2406 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002407 sata_scr_read(link, SCR_ERROR, &serr);
2408 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002409
2410 /* bail out if no device is present */
2411 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2412 DPRINTK("EXIT, no device\n");
2413 return;
2414 }
2415
2416 /* set up device control */
2417 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2418}
2419
2420static void mv_error_handler(struct ata_port *ap)
2421{
2422 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2423 mv_hardreset, mv_postreset);
2424}
2425
2426static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2427{
2428 mv_stop_dma(qc->ap);
2429}
2430
2431static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e72005-09-01 18:26:17 -04002432{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002433 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002434 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2435 u32 tmp, mask;
2436 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002437
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002438 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002439
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002440 shift = ap->port_no * 2;
2441 if (hc > 0)
2442 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002443
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002444 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002445
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002446 /* disable assertion of portN err, done events */
2447 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2448 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2449}
2450
2451static void mv_eh_thaw(struct ata_port *ap)
2452{
2453 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2454 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2455 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2456 void __iomem *port_mmio = mv_ap_base(ap);
2457 u32 tmp, mask, hc_irq_cause;
2458 unsigned int shift, hc_port_no = ap->port_no;
2459
2460 /* FIXME: handle coalescing completion events properly */
2461
2462 shift = ap->port_no * 2;
2463 if (hc > 0) {
2464 shift++;
2465 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002466 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002467
2468 mask = 0x3 << shift;
2469
2470 /* clear EDMA errors on this port */
2471 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2472
2473 /* clear pending irq events */
2474 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2475 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2476 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2477 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2478
2479 /* enable assertion of portN err, done events */
2480 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2481 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002482}
2483
Brett Russ05b308e2005-10-05 17:08:53 -04002484/**
2485 * mv_port_init - Perform some early initialization on a single port.
2486 * @port: libata data structure storing shadow register addresses
2487 * @port_mmio: base address of the port
2488 *
2489 * Initialize shadow register mmio addresses, clear outstanding
2490 * interrupts on the port, and unmask interrupts for the future
2491 * start of the port.
2492 *
2493 * LOCKING:
2494 * Inherited from caller.
2495 */
Brett Russ31961942005-09-30 01:36:00 -04002496static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2497{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002498 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002499 unsigned serr_ofs;
2500
Jeff Garzik8b260242005-11-12 12:32:50 -05002501 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002502 */
2503 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002504 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002505 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2506 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2507 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2508 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2509 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2510 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002511 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002512 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2513 /* special case: control/altstatus doesn't have ATA_REG_ address */
2514 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2515
2516 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002517 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e72005-09-01 18:26:17 -04002518
Brett Russ31961942005-09-30 01:36:00 -04002519 /* Clear any currently outstanding port interrupt conditions */
2520 serr_ofs = mv_scr_offset(SCR_ERROR);
2521 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2522 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2523
Mark Lord646a4da2008-01-26 18:30:37 -05002524 /* unmask all non-transient EDMA error interrupts */
2525 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e72005-09-01 18:26:17 -04002526
Jeff Garzik8b260242005-11-12 12:32:50 -05002527 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002528 readl(port_mmio + EDMA_CFG_OFS),
2529 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2530 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e72005-09-01 18:26:17 -04002531}
2532
Tejun Heo4447d352007-04-17 23:44:08 +09002533static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002534{
Tejun Heo4447d352007-04-17 23:44:08 +09002535 struct pci_dev *pdev = to_pci_dev(host->dev);
2536 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002537 u32 hp_flags = hpriv->hp_flags;
2538
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002539 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002540 case chip_5080:
2541 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002542 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002543
Auke Kok44c10132007-06-08 15:46:36 -07002544 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002545 case 0x1:
2546 hp_flags |= MV_HP_ERRATA_50XXB0;
2547 break;
2548 case 0x3:
2549 hp_flags |= MV_HP_ERRATA_50XXB2;
2550 break;
2551 default:
2552 dev_printk(KERN_WARNING, &pdev->dev,
2553 "Applying 50XXB2 workarounds to unknown rev\n");
2554 hp_flags |= MV_HP_ERRATA_50XXB2;
2555 break;
2556 }
2557 break;
2558
2559 case chip_504x:
2560 case chip_508x:
2561 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002562 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002563
Auke Kok44c10132007-06-08 15:46:36 -07002564 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002565 case 0x0:
2566 hp_flags |= MV_HP_ERRATA_50XXB0;
2567 break;
2568 case 0x3:
2569 hp_flags |= MV_HP_ERRATA_50XXB2;
2570 break;
2571 default:
2572 dev_printk(KERN_WARNING, &pdev->dev,
2573 "Applying B2 workarounds to unknown rev\n");
2574 hp_flags |= MV_HP_ERRATA_50XXB2;
2575 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002576 }
2577 break;
2578
2579 case chip_604x:
2580 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002581 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002582 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002583
Auke Kok44c10132007-06-08 15:46:36 -07002584 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002585 case 0x7:
2586 hp_flags |= MV_HP_ERRATA_60X1B2;
2587 break;
2588 case 0x9:
2589 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002590 break;
2591 default:
2592 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002593 "Applying B2 workarounds to unknown rev\n");
2594 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002595 break;
2596 }
2597 break;
2598
Jeff Garzike4e7b892006-01-31 12:18:41 -05002599 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002600 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002601 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2602 (pdev->device == 0x2300 || pdev->device == 0x2310))
2603 {
Mark Lord4e520032007-12-11 12:58:05 -05002604 /*
2605 * Highpoint RocketRAID PCIe 23xx series cards:
2606 *
2607 * Unconfigured drives are treated as "Legacy"
2608 * by the BIOS, and it overwrites sector 8 with
2609 * a "Lgcy" metadata block prior to Linux boot.
2610 *
2611 * Configured drives (RAID or JBOD) leave sector 8
2612 * alone, but instead overwrite a high numbered
2613 * sector for the RAID metadata. This sector can
2614 * be determined exactly, by truncating the physical
2615 * drive capacity to a nice even GB value.
2616 *
2617 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2618 *
2619 * Warn the user, lest they think we're just buggy.
2620 */
2621 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2622 " BIOS CORRUPTS DATA on all attached drives,"
2623 " regardless of if/how they are configured."
2624 " BEWARE!\n");
2625 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2626 " use sectors 8-9 on \"Legacy\" drives,"
2627 " and avoid the final two gigabytes on"
2628 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002629 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002630 case chip_6042:
2631 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002632 hp_flags |= MV_HP_GEN_IIE;
2633
Auke Kok44c10132007-06-08 15:46:36 -07002634 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002635 case 0x0:
2636 hp_flags |= MV_HP_ERRATA_XX42A0;
2637 break;
2638 case 0x1:
2639 hp_flags |= MV_HP_ERRATA_60X1C0;
2640 break;
2641 default:
2642 dev_printk(KERN_WARNING, &pdev->dev,
2643 "Applying 60X1C0 workarounds to unknown rev\n");
2644 hp_flags |= MV_HP_ERRATA_60X1C0;
2645 break;
2646 }
2647 break;
2648
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002649 default:
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002650 dev_printk(KERN_ERR, &pdev->dev,
2651 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002652 return 1;
2653 }
2654
2655 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002656 if (hp_flags & MV_HP_PCIE) {
2657 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2658 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2659 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2660 } else {
2661 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2662 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2663 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2664 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002665
2666 return 0;
2667}
2668
Brett Russ05b308e2005-10-05 17:08:53 -04002669/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002670 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002671 * @host: ATA host to initialize
2672 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002673 *
2674 * If possible, do an early global reset of the host. Then do
2675 * our port init and clear/unmask all/relevant host interrupts.
2676 *
2677 * LOCKING:
2678 * Inherited from caller.
2679 */
Tejun Heo4447d352007-04-17 23:44:08 +09002680static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e72005-09-01 18:26:17 -04002681{
2682 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002683 struct pci_dev *pdev = to_pci_dev(host->dev);
2684 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2685 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002686
Jeff Garzik47c2b672005-11-12 21:13:17 -05002687 /* global interrupt mask */
2688 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2689
Tejun Heo4447d352007-04-17 23:44:08 +09002690 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002691 if (rc)
2692 goto done;
2693
Tejun Heo4447d352007-04-17 23:44:08 +09002694 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002695
Tejun Heo4447d352007-04-17 23:44:08 +09002696 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002697 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e72005-09-01 18:26:17 -04002698
Jeff Garzikc9d39132005-11-13 17:47:51 -05002699 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002700 if (rc)
Brett Russ20f733e72005-09-01 18:26:17 -04002701 goto done;
Brett Russ20f733e72005-09-01 18:26:17 -04002702
Jeff Garzik522479f2005-11-12 22:14:02 -05002703 hpriv->ops->reset_flash(hpriv, mmio);
2704 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002705 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e72005-09-01 18:26:17 -04002706
Tejun Heo4447d352007-04-17 23:44:08 +09002707 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002708 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002709 void __iomem *port_mmio = mv_port_base(mmio, port);
2710
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002711 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002712 ifctl |= (1 << 7); /* enable gen2i speed */
2713 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002714 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2715 }
2716
Jeff Garzikc9d39132005-11-13 17:47:51 -05002717 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002718 }
2719
Tejun Heo4447d352007-04-17 23:44:08 +09002720 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002721 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002722 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002723 unsigned int offset = port_mmio - mmio;
2724
2725 mv_port_init(&ap->ioaddr, port_mmio);
2726
2727 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2728 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
Brett Russ20f733e72005-09-01 18:26:17 -04002729 }
2730
2731 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002732 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2733
2734 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2735 "(before clear)=0x%08x\n", hc,
2736 readl(hc_mmio + HC_CFG_OFS),
2737 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2738
2739 /* Clear any currently outstanding hc interrupt conditions */
2740 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e72005-09-01 18:26:17 -04002741 }
2742
Brett Russ31961942005-09-30 01:36:00 -04002743 /* Clear any currently outstanding host interrupt conditions */
Mark Lord02a121d2007-12-01 13:07:22 -05002744 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002745
2746 /* and unmask interrupt generation for host regs */
Mark Lord02a121d2007-12-01 13:07:22 -05002747 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002748
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002749 if (IS_GEN_I(hpriv))
Jeff Garzikfb621e22007-02-25 04:19:45 -05002750 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2751 else
2752 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e72005-09-01 18:26:17 -04002753
2754 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002755 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e72005-09-01 18:26:17 -04002756 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2757 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
Mark Lord02a121d2007-12-01 13:07:22 -05002758 readl(mmio + hpriv->irq_cause_ofs),
2759 readl(mmio + hpriv->irq_mask_ofs));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002760
Brett Russ31961942005-09-30 01:36:00 -04002761done:
Brett Russ20f733e72005-09-01 18:26:17 -04002762 return rc;
2763}
2764
Brett Russ05b308e2005-10-05 17:08:53 -04002765/**
2766 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002767 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002768 *
2769 * FIXME: complete this.
2770 *
2771 * LOCKING:
2772 * Inherited from caller.
2773 */
Tejun Heo4447d352007-04-17 23:44:08 +09002774static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002775{
Tejun Heo4447d352007-04-17 23:44:08 +09002776 struct pci_dev *pdev = to_pci_dev(host->dev);
2777 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002778 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002779 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002780
2781 /* Use this to determine the HW stepping of the chip so we know
2782 * what errata to workaround
2783 */
Brett Russ31961942005-09-30 01:36:00 -04002784 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2785 if (scc == 0)
2786 scc_s = "SCSI";
2787 else if (scc == 0x01)
2788 scc_s = "RAID";
2789 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002790 scc_s = "?";
2791
2792 if (IS_GEN_I(hpriv))
2793 gen = "I";
2794 else if (IS_GEN_II(hpriv))
2795 gen = "II";
2796 else if (IS_GEN_IIE(hpriv))
2797 gen = "IIE";
2798 else
2799 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002800
Jeff Garzika9524a72005-10-30 14:39:11 -05002801 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002802 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2803 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002804 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2805}
2806
Mark Lordda2fa9b2008-01-26 18:32:45 -05002807static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2808{
2809 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2810 MV_CRQB_Q_SZ, 0);
2811 if (!hpriv->crqb_pool)
2812 return -ENOMEM;
2813
2814 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2815 MV_CRPB_Q_SZ, 0);
2816 if (!hpriv->crpb_pool)
2817 return -ENOMEM;
2818
2819 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2820 MV_SG_TBL_SZ, 0);
2821 if (!hpriv->sg_tbl_pool)
2822 return -ENOMEM;
2823
2824 return 0;
2825}
2826
Brett Russ05b308e2005-10-05 17:08:53 -04002827/**
2828 * mv_init_one - handle a positive probe of a Marvell host
2829 * @pdev: PCI device found
2830 * @ent: PCI device ID entry for the matched host
2831 *
2832 * LOCKING:
2833 * Inherited from caller.
2834 */
Brett Russ20f733e72005-09-01 18:26:17 -04002835static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2836{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002837 static int printed_version;
Brett Russ20f733e72005-09-01 18:26:17 -04002838 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002839 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2840 struct ata_host *host;
2841 struct mv_host_priv *hpriv;
2842 int n_ports, rc;
Brett Russ20f733e72005-09-01 18:26:17 -04002843
Jeff Garzika9524a72005-10-30 14:39:11 -05002844 if (!printed_version++)
2845 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e72005-09-01 18:26:17 -04002846
Tejun Heo4447d352007-04-17 23:44:08 +09002847 /* allocate host */
2848 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2849
2850 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2851 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2852 if (!host || !hpriv)
2853 return -ENOMEM;
2854 host->private_data = hpriv;
2855
2856 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002857 rc = pcim_enable_device(pdev);
2858 if (rc)
Brett Russ20f733e72005-09-01 18:26:17 -04002859 return rc;
Brett Russ20f733e72005-09-01 18:26:17 -04002860
Tejun Heo0d5ff562007-02-01 15:06:36 +09002861 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2862 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002863 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002864 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002865 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002866 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e72005-09-01 18:26:17 -04002867
Jeff Garzikd88184f2007-02-26 01:26:06 -05002868 rc = pci_go_64(pdev);
2869 if (rc)
2870 return rc;
2871
Mark Lordda2fa9b2008-01-26 18:32:45 -05002872 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2873 if (rc)
2874 return rc;
2875
Brett Russ20f733e72005-09-01 18:26:17 -04002876 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002877 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002878 if (rc)
2879 return rc;
Brett Russ20f733e72005-09-01 18:26:17 -04002880
Brett Russ31961942005-09-30 01:36:00 -04002881 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002882 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002883 pci_intx(pdev, 1);
Brett Russ20f733e72005-09-01 18:26:17 -04002884
Brett Russ31961942005-09-30 01:36:00 -04002885 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002886 mv_print_info(host);
Brett Russ20f733e72005-09-01 18:26:17 -04002887
Tejun Heo4447d352007-04-17 23:44:08 +09002888 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002889 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002890 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002891 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e72005-09-01 18:26:17 -04002892}
2893
2894static int __init mv_init(void)
2895{
Pavel Roskinb7887192006-08-10 18:13:18 +09002896 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e72005-09-01 18:26:17 -04002897}
2898
2899static void __exit mv_exit(void)
2900{
2901 pci_unregister_driver(&mv_pci_driver);
2902}
2903
2904MODULE_AUTHOR("Brett Russ");
2905MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2906MODULE_LICENSE("GPL");
2907MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2908MODULE_VERSION(DRV_VERSION);
2909
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002910module_param(msi, int, 0444);
2911MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2912
Brett Russ20f733e72005-09-01 18:26:17 -04002913module_init(mv_init);
2914module_exit(mv_exit);