ide.h 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _IDE_H
  3. #define _IDE_H
  4. /*
  5. * linux/include/linux/ide.h
  6. *
  7. * Copyright (C) 1994-2002 Linus Torvalds & authors
  8. */
  9. #include <linux/init.h>
  10. #include <linux/ioport.h>
  11. #include <linux/ata.h>
  12. #include <linux/blk-mq.h>
  13. #include <linux/proc_fs.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/bitops.h>
  16. #include <linux/bio.h>
  17. #include <linux/pci.h>
  18. #include <linux/completion.h>
  19. #include <linux/pm.h>
  20. #include <linux/mutex.h>
  21. /* for request_sense */
  22. #include <linux/cdrom.h>
  23. #include <scsi/scsi_cmnd.h>
  24. #include <asm/byteorder.h>
  25. #include <asm/io.h>
  26. /*
  27. * Probably not wise to fiddle with these
  28. */
  29. #define SUPPORT_VLB_SYNC 1
  30. #define IDE_DEFAULT_MAX_FAILURES 1
  31. #define ERROR_MAX 8 /* Max read/write errors per sector */
  32. #define ERROR_RESET 3 /* Reset controller every 4th retry */
  33. #define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
  34. struct device;
  35. /* values for ide_request.type */
  36. enum ata_priv_type {
  37. ATA_PRIV_MISC,
  38. ATA_PRIV_TASKFILE,
  39. ATA_PRIV_PC,
  40. ATA_PRIV_SENSE, /* sense request */
  41. ATA_PRIV_PM_SUSPEND, /* suspend request */
  42. ATA_PRIV_PM_RESUME, /* resume request */
  43. };
  44. struct ide_request {
  45. struct scsi_request sreq;
  46. u8 sense[SCSI_SENSE_BUFFERSIZE];
  47. u8 type;
  48. void *special;
  49. };
  50. static inline struct ide_request *ide_req(struct request *rq)
  51. {
  52. return blk_mq_rq_to_pdu(rq);
  53. }
  54. static inline bool ata_misc_request(struct request *rq)
  55. {
  56. return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_MISC;
  57. }
  58. static inline bool ata_taskfile_request(struct request *rq)
  59. {
  60. return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_TASKFILE;
  61. }
  62. static inline bool ata_pc_request(struct request *rq)
  63. {
  64. return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_PC;
  65. }
  66. static inline bool ata_sense_request(struct request *rq)
  67. {
  68. return blk_rq_is_private(rq) && ide_req(rq)->type == ATA_PRIV_SENSE;
  69. }
  70. static inline bool ata_pm_request(struct request *rq)
  71. {
  72. return blk_rq_is_private(rq) &&
  73. (ide_req(rq)->type == ATA_PRIV_PM_SUSPEND ||
  74. ide_req(rq)->type == ATA_PRIV_PM_RESUME);
  75. }
  76. /* Error codes returned in result to the higher part of the driver. */
  77. enum {
  78. IDE_DRV_ERROR_GENERAL = 101,
  79. IDE_DRV_ERROR_FILEMARK = 102,
  80. IDE_DRV_ERROR_EOD = 103,
  81. };
  82. /*
  83. * Definitions for accessing IDE controller registers
  84. */
  85. #define IDE_NR_PORTS (10)
  86. struct ide_io_ports {
  87. unsigned long data_addr;
  88. union {
  89. unsigned long error_addr; /* read: error */
  90. unsigned long feature_addr; /* write: feature */
  91. };
  92. unsigned long nsect_addr;
  93. unsigned long lbal_addr;
  94. unsigned long lbam_addr;
  95. unsigned long lbah_addr;
  96. unsigned long device_addr;
  97. union {
  98. unsigned long status_addr; /*  read: status  */
  99. unsigned long command_addr; /* write: command */
  100. };
  101. unsigned long ctl_addr;
  102. unsigned long irq_addr;
  103. };
  104. #define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
  105. #define BAD_R_STAT (ATA_BUSY | ATA_ERR)
  106. #define BAD_W_STAT (BAD_R_STAT | ATA_DF)
  107. #define BAD_STAT (BAD_R_STAT | ATA_DRQ)
  108. #define DRIVE_READY (ATA_DRDY | ATA_DSC)
  109. #define BAD_CRC (ATA_ABORTED | ATA_ICRC)
  110. #define SATA_NR_PORTS (3) /* 16 possible ?? */
  111. #define SATA_STATUS_OFFSET (0)
  112. #define SATA_ERROR_OFFSET (1)
  113. #define SATA_CONTROL_OFFSET (2)
  114. /*
  115. * Our Physical Region Descriptor (PRD) table should be large enough
  116. * to handle the biggest I/O request we are likely to see. Since requests
  117. * can have no more than 256 sectors, and since the typical blocksize is
  118. * two or more sectors, we could get by with a limit of 128 entries here for
  119. * the usual worst case. Most requests seem to include some contiguous blocks,
  120. * further reducing the number of table entries required.
  121. *
  122. * The driver reverts to PIO mode for individual requests that exceed
  123. * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
  124. * 100% of all crazy scenarios here is not necessary.
  125. *
  126. * As it turns out though, we must allocate a full 4KB page for this,
  127. * so the two PRD tables (ide0 & ide1) will each get half of that,
  128. * allowing each to have about 256 entries (8 bytes each) from this.
  129. */
  130. #define PRD_BYTES 8
  131. #define PRD_ENTRIES 256
  132. /*
  133. * Some more useful definitions
  134. */
  135. #define PARTN_BITS 6 /* number of minor dev bits for partitions */
  136. #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
  137. /*
  138. * Timeouts for various operations:
  139. */
  140. enum {
  141. /* spec allows up to 20ms, but CF cards and SSD drives need more */
  142. WAIT_DRQ = 1 * HZ, /* 1s */
  143. /* some laptops are very slow */
  144. WAIT_READY = 5 * HZ, /* 5s */
  145. /* should be less than 3ms (?), if all ATAPI CD is closed at boot */
  146. WAIT_PIDENTIFY = 10 * HZ, /* 10s */
  147. /* worst case when spinning up */
  148. WAIT_WORSTCASE = 30 * HZ, /* 30s */
  149. /* maximum wait for an IRQ to happen */
  150. WAIT_CMD = 10 * HZ, /* 10s */
  151. /* Some drives require a longer IRQ timeout. */
  152. WAIT_FLOPPY_CMD = 50 * HZ, /* 50s */
  153. /*
  154. * Some drives (for example, Seagate STT3401A Travan) require a very
  155. * long timeout, because they don't return an interrupt or clear their
  156. * BSY bit until after the command completes (even retension commands).
  157. */
  158. WAIT_TAPE_CMD = 900 * HZ, /* 900s */
  159. /* minimum sleep time */
  160. WAIT_MIN_SLEEP = HZ / 50, /* 20ms */
  161. };
  162. /*
  163. * Op codes for special requests to be handled by ide_special_rq().
  164. * Values should be in the range of 0x20 to 0x3f.
  165. */
  166. #define REQ_DRIVE_RESET 0x20
  167. #define REQ_DEVSET_EXEC 0x21
  168. #define REQ_PARK_HEADS 0x22
  169. #define REQ_UNPARK_HEADS 0x23
  170. /*
  171. * hwif_chipset_t is used to keep track of the specific hardware
  172. * chipset used by each IDE interface, if known.
  173. */
  174. enum { ide_unknown, ide_generic, ide_pci,
  175. ide_cmd640, ide_dtc2278, ide_ali14xx,
  176. ide_qd65xx, ide_umc8672, ide_ht6560b,
  177. ide_4drives, ide_pmac, ide_acorn,
  178. ide_au1xxx, ide_palm3710
  179. };
  180. typedef u8 hwif_chipset_t;
  181. /*
  182. * Structure to hold all information about the location of this port
  183. */
  184. struct ide_hw {
  185. union {
  186. struct ide_io_ports io_ports;
  187. unsigned long io_ports_array[IDE_NR_PORTS];
  188. };
  189. int irq; /* our irq number */
  190. struct device *dev, *parent;
  191. unsigned long config;
  192. };
  193. static inline void ide_std_init_ports(struct ide_hw *hw,
  194. unsigned long io_addr,
  195. unsigned long ctl_addr)
  196. {
  197. unsigned int i;
  198. for (i = 0; i <= 7; i++)
  199. hw->io_ports_array[i] = io_addr++;
  200. hw->io_ports.ctl_addr = ctl_addr;
  201. }
  202. #define MAX_HWIFS 10
  203. /*
  204. * Now for the data we need to maintain per-drive: ide_drive_t
  205. */
  206. #define ide_scsi 0x21
  207. #define ide_disk 0x20
  208. #define ide_optical 0x7
  209. #define ide_cdrom 0x5
  210. #define ide_tape 0x1
  211. #define ide_floppy 0x0
  212. /*
  213. * Special Driver Flags
  214. */
  215. enum {
  216. IDE_SFLAG_SET_GEOMETRY = BIT(0),
  217. IDE_SFLAG_RECALIBRATE = BIT(1),
  218. IDE_SFLAG_SET_MULTMODE = BIT(2),
  219. };
  220. /*
  221. * Status returned from various ide_ functions
  222. */
  223. typedef enum {
  224. ide_stopped, /* no drive operation was started */
  225. ide_started, /* a drive operation was started, handler was set */
  226. } ide_startstop_t;
  227. enum {
  228. IDE_VALID_ERROR = BIT(1),
  229. IDE_VALID_FEATURE = IDE_VALID_ERROR,
  230. IDE_VALID_NSECT = BIT(2),
  231. IDE_VALID_LBAL = BIT(3),
  232. IDE_VALID_LBAM = BIT(4),
  233. IDE_VALID_LBAH = BIT(5),
  234. IDE_VALID_DEVICE = BIT(6),
  235. IDE_VALID_LBA = IDE_VALID_LBAL |
  236. IDE_VALID_LBAM |
  237. IDE_VALID_LBAH,
  238. IDE_VALID_OUT_TF = IDE_VALID_FEATURE |
  239. IDE_VALID_NSECT |
  240. IDE_VALID_LBA,
  241. IDE_VALID_IN_TF = IDE_VALID_NSECT |
  242. IDE_VALID_LBA,
  243. IDE_VALID_OUT_HOB = IDE_VALID_OUT_TF,
  244. IDE_VALID_IN_HOB = IDE_VALID_ERROR |
  245. IDE_VALID_NSECT |
  246. IDE_VALID_LBA,
  247. };
  248. enum {
  249. IDE_TFLAG_LBA48 = BIT(0),
  250. IDE_TFLAG_WRITE = BIT(1),
  251. IDE_TFLAG_CUSTOM_HANDLER = BIT(2),
  252. IDE_TFLAG_DMA_PIO_FALLBACK = BIT(3),
  253. /* force 16-bit I/O operations */
  254. IDE_TFLAG_IO_16BIT = BIT(4),
  255. /* struct ide_cmd was allocated using kmalloc() */
  256. IDE_TFLAG_DYN = BIT(5),
  257. IDE_TFLAG_FS = BIT(6),
  258. IDE_TFLAG_MULTI_PIO = BIT(7),
  259. IDE_TFLAG_SET_XFER = BIT(8),
  260. };
  261. enum {
  262. IDE_FTFLAG_FLAGGED = BIT(0),
  263. IDE_FTFLAG_SET_IN_FLAGS = BIT(1),
  264. IDE_FTFLAG_OUT_DATA = BIT(2),
  265. IDE_FTFLAG_IN_DATA = BIT(3),
  266. };
  267. struct ide_taskfile {
  268. u8 data; /* 0: data byte (for TASKFILE ioctl) */
  269. union { /* 1: */
  270. u8 error; /* read: error */
  271. u8 feature; /* write: feature */
  272. };
  273. u8 nsect; /* 2: number of sectors */
  274. u8 lbal; /* 3: LBA low */
  275. u8 lbam; /* 4: LBA mid */
  276. u8 lbah; /* 5: LBA high */
  277. u8 device; /* 6: device select */
  278. union { /* 7: */
  279. u8 status; /* read: status */
  280. u8 command; /* write: command */
  281. };
  282. };
  283. struct ide_cmd {
  284. struct ide_taskfile tf;
  285. struct ide_taskfile hob;
  286. struct {
  287. struct {
  288. u8 tf;
  289. u8 hob;
  290. } out, in;
  291. } valid;
  292. u16 tf_flags;
  293. u8 ftf_flags; /* for TASKFILE ioctl */
  294. int protocol;
  295. int sg_nents; /* number of sg entries */
  296. int orig_sg_nents;
  297. int sg_dma_direction; /* DMA transfer direction */
  298. unsigned int nbytes;
  299. unsigned int nleft;
  300. unsigned int last_xfer_len;
  301. struct scatterlist *cursg;
  302. unsigned int cursg_ofs;
  303. struct request *rq; /* copy of request */
  304. };
  305. /* ATAPI packet command flags */
  306. enum {
  307. /* set when an error is considered normal - no retry (ide-tape) */
  308. PC_FLAG_ABORT = BIT(0),
  309. PC_FLAG_SUPPRESS_ERROR = BIT(1),
  310. PC_FLAG_WAIT_FOR_DSC = BIT(2),
  311. PC_FLAG_DMA_OK = BIT(3),
  312. PC_FLAG_DMA_IN_PROGRESS = BIT(4),
  313. PC_FLAG_DMA_ERROR = BIT(5),
  314. PC_FLAG_WRITING = BIT(6),
  315. };
  316. #define ATAPI_WAIT_PC (60 * HZ)
  317. struct ide_atapi_pc {
  318. /* actual packet bytes */
  319. u8 c[12];
  320. /* incremented on each retry */
  321. int retries;
  322. int error;
  323. /* bytes to transfer */
  324. int req_xfer;
  325. /* the corresponding request */
  326. struct request *rq;
  327. unsigned long flags;
  328. /*
  329. * those are more or less driver-specific and some of them are subject
  330. * to change/removal later.
  331. */
  332. unsigned long timeout;
  333. };
  334. struct ide_devset;
  335. struct ide_driver;
  336. #ifdef CONFIG_BLK_DEV_IDEACPI
  337. struct ide_acpi_drive_link;
  338. struct ide_acpi_hwif_link;
  339. #endif
  340. struct ide_drive_s;
  341. struct ide_disk_ops {
  342. int (*check)(struct ide_drive_s *, const char *);
  343. int (*get_capacity)(struct ide_drive_s *);
  344. void (*unlock_native_capacity)(struct ide_drive_s *);
  345. void (*setup)(struct ide_drive_s *);
  346. void (*flush)(struct ide_drive_s *);
  347. int (*init_media)(struct ide_drive_s *, struct gendisk *);
  348. int (*set_doorlock)(struct ide_drive_s *, struct gendisk *,
  349. int);
  350. ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *,
  351. sector_t);
  352. int (*ioctl)(struct ide_drive_s *, struct block_device *,
  353. fmode_t, unsigned int, unsigned long);
  354. int (*compat_ioctl)(struct ide_drive_s *, struct block_device *,
  355. fmode_t, unsigned int, unsigned long);
  356. };
  357. /* ATAPI device flags */
  358. enum {
  359. IDE_AFLAG_DRQ_INTERRUPT = BIT(0),
  360. /* ide-cd */
  361. /* Drive cannot eject the disc. */
  362. IDE_AFLAG_NO_EJECT = BIT(1),
  363. /* Drive is a pre ATAPI 1.2 drive. */
  364. IDE_AFLAG_PRE_ATAPI12 = BIT(2),
  365. /* TOC addresses are in BCD. */
  366. IDE_AFLAG_TOCADDR_AS_BCD = BIT(3),
  367. /* TOC track numbers are in BCD. */
  368. IDE_AFLAG_TOCTRACKS_AS_BCD = BIT(4),
  369. /* Saved TOC information is current. */
  370. IDE_AFLAG_TOC_VALID = BIT(6),
  371. /* We think that the drive door is locked. */
  372. IDE_AFLAG_DOOR_LOCKED = BIT(7),
  373. /* SET_CD_SPEED command is unsupported. */
  374. IDE_AFLAG_NO_SPEED_SELECT = BIT(8),
  375. IDE_AFLAG_VERTOS_300_SSD = BIT(9),
  376. IDE_AFLAG_VERTOS_600_ESD = BIT(10),
  377. IDE_AFLAG_SANYO_3CD = BIT(11),
  378. IDE_AFLAG_FULL_CAPS_PAGE = BIT(12),
  379. IDE_AFLAG_PLAY_AUDIO_OK = BIT(13),
  380. IDE_AFLAG_LE_SPEED_FIELDS = BIT(14),
  381. /* ide-floppy */
  382. /* Avoid commands not supported in Clik drive */
  383. IDE_AFLAG_CLIK_DRIVE = BIT(15),
  384. /* Requires BH algorithm for packets */
  385. IDE_AFLAG_ZIP_DRIVE = BIT(16),
  386. /* Supports format progress report */
  387. IDE_AFLAG_SRFP = BIT(17),
  388. /* ide-tape */
  389. IDE_AFLAG_IGNORE_DSC = BIT(18),
  390. /* 0 When the tape position is unknown */
  391. IDE_AFLAG_ADDRESS_VALID = BIT(19),
  392. /* Device already opened */
  393. IDE_AFLAG_BUSY = BIT(20),
  394. /* Attempt to auto-detect the current user block size */
  395. IDE_AFLAG_DETECT_BS = BIT(21),
  396. /* Currently on a filemark */
  397. IDE_AFLAG_FILEMARK = BIT(22),
  398. /* 0 = no tape is loaded, so we don't rewind after ejecting */
  399. IDE_AFLAG_MEDIUM_PRESENT = BIT(23),
  400. IDE_AFLAG_NO_AUTOCLOSE = BIT(24),
  401. };
  402. /* device flags */
  403. enum {
  404. /* restore settings after device reset */
  405. IDE_DFLAG_KEEP_SETTINGS = BIT(0),
  406. /* device is using DMA for read/write */
  407. IDE_DFLAG_USING_DMA = BIT(1),
  408. /* okay to unmask other IRQs */
  409. IDE_DFLAG_UNMASK = BIT(2),
  410. /* don't attempt flushes */
  411. IDE_DFLAG_NOFLUSH = BIT(3),
  412. /* DSC overlap */
  413. IDE_DFLAG_DSC_OVERLAP = BIT(4),
  414. /* give potential excess bandwidth */
  415. IDE_DFLAG_NICE1 = BIT(5),
  416. /* device is physically present */
  417. IDE_DFLAG_PRESENT = BIT(6),
  418. /* disable Host Protected Area */
  419. IDE_DFLAG_NOHPA = BIT(7),
  420. /* id read from device (synthetic if not set) */
  421. IDE_DFLAG_ID_READ = BIT(8),
  422. IDE_DFLAG_NOPROBE = BIT(9),
  423. /* need to do check_media_change() */
  424. IDE_DFLAG_REMOVABLE = BIT(10),
  425. IDE_DFLAG_FORCED_GEOM = BIT(12),
  426. /* disallow setting unmask bit */
  427. IDE_DFLAG_NO_UNMASK = BIT(13),
  428. /* disallow enabling 32-bit I/O */
  429. IDE_DFLAG_NO_IO_32BIT = BIT(14),
  430. /* for removable only: door lock/unlock works */
  431. IDE_DFLAG_DOORLOCKING = BIT(15),
  432. /* disallow DMA */
  433. IDE_DFLAG_NODMA = BIT(16),
  434. /* powermanagement told us not to do anything, so sleep nicely */
  435. IDE_DFLAG_BLOCKED = BIT(17),
  436. /* sleeping & sleep field valid */
  437. IDE_DFLAG_SLEEPING = BIT(18),
  438. IDE_DFLAG_POST_RESET = BIT(19),
  439. IDE_DFLAG_UDMA33_WARNED = BIT(20),
  440. IDE_DFLAG_LBA48 = BIT(21),
  441. /* status of write cache */
  442. IDE_DFLAG_WCACHE = BIT(22),
  443. /* used for ignoring ATA_DF */
  444. IDE_DFLAG_NOWERR = BIT(23),
  445. /* retrying in PIO */
  446. IDE_DFLAG_DMA_PIO_RETRY = BIT(24),
  447. IDE_DFLAG_LBA = BIT(25),
  448. /* don't unload heads */
  449. IDE_DFLAG_NO_UNLOAD = BIT(26),
  450. /* heads unloaded, please don't reset port */
  451. IDE_DFLAG_PARKED = BIT(27),
  452. IDE_DFLAG_MEDIA_CHANGED = BIT(28),
  453. /* write protect */
  454. IDE_DFLAG_WP = BIT(29),
  455. IDE_DFLAG_FORMAT_IN_PROGRESS = BIT(30),
  456. IDE_DFLAG_NIEN_QUIRK = BIT(31),
  457. };
  458. struct ide_drive_s {
  459. char name[4]; /* drive name, such as "hda" */
  460. char driver_req[10]; /* requests specific driver */
  461. struct request_queue *queue; /* request queue */
  462. bool (*prep_rq)(struct ide_drive_s *, struct request *);
  463. struct blk_mq_tag_set tag_set;
  464. struct request *rq; /* current request */
  465. void *driver_data; /* extra driver data */
  466. u16 *id; /* identification info */
  467. #ifdef CONFIG_IDE_PROC_FS
  468. struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
  469. const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */
  470. #endif
  471. struct hwif_s *hwif; /* actually (ide_hwif_t *) */
  472. const struct ide_disk_ops *disk_ops;
  473. unsigned long dev_flags;
  474. unsigned long sleep; /* sleep until this time */
  475. unsigned long timeout; /* max time to wait for irq */
  476. u8 special_flags; /* special action flags */
  477. u8 select; /* basic drive/head select reg value */
  478. u8 retry_pio; /* retrying dma capable host in pio */
  479. u8 waiting_for_dma; /* dma currently in progress */
  480. u8 dma; /* atapi dma flag */
  481. u8 init_speed; /* transfer rate set at boot */
  482. u8 current_speed; /* current transfer rate set */
  483. u8 desired_speed; /* desired transfer rate set */
  484. u8 pio_mode; /* for ->set_pio_mode _only_ */
  485. u8 dma_mode; /* for ->set_dma_mode _only_ */
  486. u8 dn; /* now wide spread use */
  487. u8 acoustic; /* acoustic management */
  488. u8 media; /* disk, cdrom, tape, floppy, ... */
  489. u8 ready_stat; /* min status value for drive ready */
  490. u8 mult_count; /* current multiple sector setting */
  491. u8 mult_req; /* requested multiple sector setting */
  492. u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
  493. u8 bad_wstat; /* used for ignoring ATA_DF */
  494. u8 head; /* "real" number of heads */
  495. u8 sect; /* "real" sectors per track */
  496. u8 bios_head; /* BIOS/fdisk/LILO number of heads */
  497. u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */
  498. /* delay this long before sending packet command */
  499. u8 pc_delay;
  500. unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
  501. unsigned int cyl; /* "real" number of cyls */
  502. void *drive_data; /* used by set_pio_mode/dev_select() */
  503. unsigned int failures; /* current failure count */
  504. unsigned int max_failures; /* maximum allowed failure count */
  505. u64 probed_capacity;/* initial/native media capacity */
  506. u64 capacity64; /* total number of sectors */
  507. int lun; /* logical unit */
  508. int crc_count; /* crc counter to reduce drive speed */
  509. unsigned long debug_mask; /* debugging levels switch */
  510. #ifdef CONFIG_BLK_DEV_IDEACPI
  511. struct ide_acpi_drive_link *acpidata;
  512. #endif
  513. struct list_head list;
  514. struct device gendev;
  515. struct completion gendev_rel_comp; /* to deal with device release() */
  516. /* current packet command */
  517. struct ide_atapi_pc *pc;
  518. /* last failed packet command */
  519. struct ide_atapi_pc *failed_pc;
  520. /* callback for packet commands */
  521. int (*pc_callback)(struct ide_drive_s *, int);
  522. ide_startstop_t (*irq_handler)(struct ide_drive_s *);
  523. unsigned long atapi_flags;
  524. struct ide_atapi_pc request_sense_pc;
  525. /* current sense rq and buffer */
  526. bool sense_rq_armed;
  527. bool sense_rq_active;
  528. struct request *sense_rq;
  529. struct request_sense sense_data;
  530. /* async sense insertion */
  531. struct work_struct rq_work;
  532. struct list_head rq_list;
  533. };
  534. typedef struct ide_drive_s ide_drive_t;
  535. #define to_ide_device(dev) container_of(dev, ide_drive_t, gendev)
  536. #define to_ide_drv(obj, cont_type) \
  537. container_of(obj, struct cont_type, dev)
  538. #define ide_drv_g(disk, cont_type) \
  539. container_of((disk)->private_data, struct cont_type, driver)
  540. struct ide_port_info;
  541. struct ide_tp_ops {
  542. void (*exec_command)(struct hwif_s *, u8);
  543. u8 (*read_status)(struct hwif_s *);
  544. u8 (*read_altstatus)(struct hwif_s *);
  545. void (*write_devctl)(struct hwif_s *, u8);
  546. void (*dev_select)(ide_drive_t *);
  547. void (*tf_load)(ide_drive_t *, struct ide_taskfile *, u8);
  548. void (*tf_read)(ide_drive_t *, struct ide_taskfile *, u8);
  549. void (*input_data)(ide_drive_t *, struct ide_cmd *,
  550. void *, unsigned int);
  551. void (*output_data)(ide_drive_t *, struct ide_cmd *,
  552. void *, unsigned int);
  553. };
  554. extern const struct ide_tp_ops default_tp_ops;
  555. /**
  556. * struct ide_port_ops - IDE port operations
  557. *
  558. * @init_dev: host specific initialization of a device
  559. * @set_pio_mode: routine to program host for PIO mode
  560. * @set_dma_mode: routine to program host for DMA mode
  561. * @reset_poll: chipset polling based on hba specifics
  562. * @pre_reset: chipset specific changes to default for device-hba resets
  563. * @resetproc: routine to reset controller after a disk reset
  564. * @maskproc: special host masking for drive selection
  565. * @quirkproc: check host's drive quirk list
  566. * @clear_irq: clear IRQ
  567. *
  568. * @mdma_filter: filter MDMA modes
  569. * @udma_filter: filter UDMA modes
  570. *
  571. * @cable_detect: detect cable type
  572. */
  573. struct ide_port_ops {
  574. void (*init_dev)(ide_drive_t *);
  575. void (*set_pio_mode)(struct hwif_s *, ide_drive_t *);
  576. void (*set_dma_mode)(struct hwif_s *, ide_drive_t *);
  577. blk_status_t (*reset_poll)(ide_drive_t *);
  578. void (*pre_reset)(ide_drive_t *);
  579. void (*resetproc)(ide_drive_t *);
  580. void (*maskproc)(ide_drive_t *, int);
  581. void (*quirkproc)(ide_drive_t *);
  582. void (*clear_irq)(ide_drive_t *);
  583. int (*test_irq)(struct hwif_s *);
  584. u8 (*mdma_filter)(ide_drive_t *);
  585. u8 (*udma_filter)(ide_drive_t *);
  586. u8 (*cable_detect)(struct hwif_s *);
  587. };
  588. struct ide_dma_ops {
  589. void (*dma_host_set)(struct ide_drive_s *, int);
  590. int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *);
  591. void (*dma_start)(struct ide_drive_s *);
  592. int (*dma_end)(struct ide_drive_s *);
  593. int (*dma_test_irq)(struct ide_drive_s *);
  594. void (*dma_lost_irq)(struct ide_drive_s *);
  595. /* below ones are optional */
  596. int (*dma_check)(struct ide_drive_s *, struct ide_cmd *);
  597. int (*dma_timer_expiry)(struct ide_drive_s *);
  598. void (*dma_clear)(struct ide_drive_s *);
  599. /*
  600. * The following method is optional and only required to be
  601. * implemented for the SFF-8038i compatible controllers.
  602. */
  603. u8 (*dma_sff_read_status)(struct hwif_s *);
  604. };
  605. enum {
  606. IDE_PFLAG_PROBING = BIT(0),
  607. };
  608. struct ide_host;
  609. typedef struct hwif_s {
  610. struct hwif_s *mate; /* other hwif from same PCI chip */
  611. struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
  612. struct ide_host *host;
  613. char name[6]; /* name of interface, eg. "ide0" */
  614. struct ide_io_ports io_ports;
  615. unsigned long sata_scr[SATA_NR_PORTS];
  616. ide_drive_t *devices[MAX_DRIVES + 1];
  617. unsigned long port_flags;
  618. u8 major; /* our major number */
  619. u8 index; /* 0 for ide0; 1 for ide1; ... */
  620. u8 channel; /* for dual-port chips: 0=primary, 1=secondary */
  621. u32 host_flags;
  622. u8 pio_mask;
  623. u8 ultra_mask;
  624. u8 mwdma_mask;
  625. u8 swdma_mask;
  626. u8 cbl; /* cable type */
  627. hwif_chipset_t chipset; /* sub-module for tuning.. */
  628. struct device *dev;
  629. void (*rw_disk)(ide_drive_t *, struct request *);
  630. const struct ide_tp_ops *tp_ops;
  631. const struct ide_port_ops *port_ops;
  632. const struct ide_dma_ops *dma_ops;
  633. /* dma physical region descriptor table (cpu view) */
  634. unsigned int *dmatable_cpu;
  635. /* dma physical region descriptor table (dma view) */
  636. dma_addr_t dmatable_dma;
  637. /* maximum number of PRD table entries */
  638. int prd_max_nents;
  639. /* PRD entry size in bytes */
  640. int prd_ent_size;
  641. /* Scatter-gather list used to build the above */
  642. struct scatterlist *sg_table;
  643. int sg_max_nents; /* Maximum number of entries in it */
  644. struct ide_cmd cmd; /* current command */
  645. int rqsize; /* max sectors per request */
  646. int irq; /* our irq number */
  647. unsigned long dma_base; /* base addr for dma ports */
  648. unsigned long config_data; /* for use by chipset-specific code */
  649. unsigned long select_data; /* for use by chipset-specific code */
  650. unsigned long extra_base; /* extra addr for dma ports */
  651. unsigned extra_ports; /* number of extra dma ports */
  652. unsigned present : 1; /* this interface exists */
  653. unsigned busy : 1; /* serializes devices on a port */
  654. struct device gendev;
  655. struct device *portdev;
  656. struct completion gendev_rel_comp; /* To deal with device release() */
  657. void *hwif_data; /* extra hwif data */
  658. #ifdef CONFIG_BLK_DEV_IDEACPI
  659. struct ide_acpi_hwif_link *acpidata;
  660. #endif
  661. /* IRQ handler, if active */
  662. ide_startstop_t (*handler)(ide_drive_t *);
  663. /* BOOL: polling active & poll_timeout field valid */
  664. unsigned int polling : 1;
  665. /* current drive */
  666. ide_drive_t *cur_dev;
  667. /* current request */
  668. struct request *rq;
  669. /* failsafe timer */
  670. struct timer_list timer;
  671. /* timeout value during long polls */
  672. unsigned long poll_timeout;
  673. /* queried upon timeouts */
  674. int (*expiry)(ide_drive_t *);
  675. int req_gen;
  676. int req_gen_timer;
  677. spinlock_t lock;
  678. } ____cacheline_internodealigned_in_smp ide_hwif_t;
  679. #define MAX_HOST_PORTS 4
  680. struct ide_host {
  681. ide_hwif_t *ports[MAX_HOST_PORTS + 1];
  682. unsigned int n_ports;
  683. struct device *dev[2];
  684. int (*init_chipset)(struct pci_dev *);
  685. void (*get_lock)(irq_handler_t, void *);
  686. void (*release_lock)(void);
  687. irq_handler_t irq_handler;
  688. unsigned long host_flags;
  689. int irq_flags;
  690. void *host_priv;
  691. ide_hwif_t *cur_port; /* for hosts requiring serialization */
  692. /* used for hosts requiring serialization */
  693. volatile unsigned long host_busy;
  694. };
  695. #define IDE_HOST_BUSY 0
  696. /*
  697. * internal ide interrupt handler type
  698. */
  699. typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
  700. typedef int (ide_expiry_t)(ide_drive_t *);
  701. /* used by ide-cd, ide-floppy, etc. */
  702. typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned);
  703. extern struct mutex ide_setting_mtx;
  704. /*
  705. * configurable drive settings
  706. */
  707. #define DS_SYNC BIT(0)
  708. struct ide_devset {
  709. int (*get)(ide_drive_t *);
  710. int (*set)(ide_drive_t *, int);
  711. unsigned int flags;
  712. };
  713. #define __DEVSET(_flags, _get, _set) { \
  714. .flags = _flags, \
  715. .get = _get, \
  716. .set = _set, \
  717. }
  718. #define ide_devset_get(name, field) \
  719. static int get_##name(ide_drive_t *drive) \
  720. { \
  721. return drive->field; \
  722. }
  723. #define ide_devset_set(name, field) \
  724. static int set_##name(ide_drive_t *drive, int arg) \
  725. { \
  726. drive->field = arg; \
  727. return 0; \
  728. }
  729. #define ide_devset_get_flag(name, flag) \
  730. static int get_##name(ide_drive_t *drive) \
  731. { \
  732. return !!(drive->dev_flags & flag); \
  733. }
  734. #define ide_devset_set_flag(name, flag) \
  735. static int set_##name(ide_drive_t *drive, int arg) \
  736. { \
  737. if (arg) \
  738. drive->dev_flags |= flag; \
  739. else \
  740. drive->dev_flags &= ~flag; \
  741. return 0; \
  742. }
  743. #define __IDE_DEVSET(_name, _flags, _get, _set) \
  744. const struct ide_devset ide_devset_##_name = \
  745. __DEVSET(_flags, _get, _set)
  746. #define IDE_DEVSET(_name, _flags, _get, _set) \
  747. static __IDE_DEVSET(_name, _flags, _get, _set)
  748. #define ide_devset_rw(_name, _func) \
  749. IDE_DEVSET(_name, 0, get_##_func, set_##_func)
  750. #define ide_devset_w(_name, _func) \
  751. IDE_DEVSET(_name, 0, NULL, set_##_func)
  752. #define ide_ext_devset_rw(_name, _func) \
  753. __IDE_DEVSET(_name, 0, get_##_func, set_##_func)
  754. #define ide_ext_devset_rw_sync(_name, _func) \
  755. __IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func)
  756. #define ide_decl_devset(_name) \
  757. extern const struct ide_devset ide_devset_##_name
  758. ide_decl_devset(io_32bit);
  759. ide_decl_devset(keepsettings);
  760. ide_decl_devset(pio_mode);
  761. ide_decl_devset(unmaskirq);
  762. ide_decl_devset(using_dma);
  763. #ifdef CONFIG_IDE_PROC_FS
  764. /*
  765. * /proc/ide interface
  766. */
  767. #define ide_devset_rw_field(_name, _field) \
  768. ide_devset_get(_name, _field); \
  769. ide_devset_set(_name, _field); \
  770. IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
  771. #define ide_devset_ro_field(_name, _field) \
  772. ide_devset_get(_name, _field); \
  773. IDE_DEVSET(_name, 0, get_##_name, NULL)
  774. #define ide_devset_rw_flag(_name, _field) \
  775. ide_devset_get_flag(_name, _field); \
  776. ide_devset_set_flag(_name, _field); \
  777. IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
  778. struct ide_proc_devset {
  779. const char *name;
  780. const struct ide_devset *setting;
  781. int min, max;
  782. int (*mulf)(ide_drive_t *);
  783. int (*divf)(ide_drive_t *);
  784. };
  785. #define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \
  786. .name = __stringify(_name), \
  787. .setting = &ide_devset_##_name, \
  788. .min = _min, \
  789. .max = _max, \
  790. .mulf = _mulf, \
  791. .divf = _divf, \
  792. }
  793. #define IDE_PROC_DEVSET(_name, _min, _max) \
  794. __IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL)
  795. typedef struct {
  796. const char *name;
  797. umode_t mode;
  798. int (*show)(struct seq_file *, void *);
  799. } ide_proc_entry_t;
  800. void proc_ide_create(void);
  801. void proc_ide_destroy(void);
  802. void ide_proc_register_port(ide_hwif_t *);
  803. void ide_proc_port_register_devices(ide_hwif_t *);
  804. void ide_proc_unregister_device(ide_drive_t *);
  805. void ide_proc_unregister_port(ide_hwif_t *);
  806. void ide_proc_register_driver(ide_drive_t *, struct ide_driver *);
  807. void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *);
  808. int ide_capacity_proc_show(struct seq_file *m, void *v);
  809. int ide_geometry_proc_show(struct seq_file *m, void *v);
  810. #else
  811. static inline void proc_ide_create(void) { ; }
  812. static inline void proc_ide_destroy(void) { ; }
  813. static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; }
  814. static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; }
  815. static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; }
  816. static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; }
  817. static inline void ide_proc_register_driver(ide_drive_t *drive,
  818. struct ide_driver *driver) { ; }
  819. static inline void ide_proc_unregister_driver(ide_drive_t *drive,
  820. struct ide_driver *driver) { ; }
  821. #endif
  822. enum {
  823. /* enter/exit functions */
  824. IDE_DBG_FUNC = BIT(0),
  825. /* sense key/asc handling */
  826. IDE_DBG_SENSE = BIT(1),
  827. /* packet commands handling */
  828. IDE_DBG_PC = BIT(2),
  829. /* request handling */
  830. IDE_DBG_RQ = BIT(3),
  831. /* driver probing/setup */
  832. IDE_DBG_PROBE = BIT(4),
  833. };
  834. /* DRV_NAME has to be defined in the driver before using the macro below */
  835. #define __ide_debug_log(lvl, fmt, args...) \
  836. { \
  837. if (unlikely(drive->debug_mask & lvl)) \
  838. printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \
  839. __func__, ## args); \
  840. }
  841. /*
  842. * Power Management state machine (rq->pm->pm_step).
  843. *
  844. * For each step, the core calls ide_start_power_step() first.
  845. * This can return:
  846. * - ide_stopped : In this case, the core calls us back again unless
  847. * step have been set to ide_power_state_completed.
  848. * - ide_started : In this case, the channel is left busy until an
  849. * async event (interrupt) occurs.
  850. * Typically, ide_start_power_step() will issue a taskfile request with
  851. * do_rw_taskfile().
  852. *
  853. * Upon reception of the interrupt, the core will call ide_complete_power_step()
  854. * with the error code if any. This routine should update the step value
  855. * and return. It should not start a new request. The core will call
  856. * ide_start_power_step() for the new step value, unless step have been
  857. * set to IDE_PM_COMPLETED.
  858. */
  859. enum {
  860. IDE_PM_START_SUSPEND,
  861. IDE_PM_FLUSH_CACHE = IDE_PM_START_SUSPEND,
  862. IDE_PM_STANDBY,
  863. IDE_PM_START_RESUME,
  864. IDE_PM_RESTORE_PIO = IDE_PM_START_RESUME,
  865. IDE_PM_IDLE,
  866. IDE_PM_RESTORE_DMA,
  867. IDE_PM_COMPLETED,
  868. };
  869. int generic_ide_suspend(struct device *, pm_message_t);
  870. int generic_ide_resume(struct device *);
  871. void ide_complete_power_step(ide_drive_t *, struct request *);
  872. ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
  873. void ide_complete_pm_rq(ide_drive_t *, struct request *);
  874. void ide_check_pm_state(ide_drive_t *, struct request *);
  875. /*
  876. * Subdrivers support.
  877. *
  878. * The gendriver.owner field should be set to the module owner of this driver.
  879. * The gendriver.name field should be set to the name of this driver
  880. */
  881. struct ide_driver {
  882. const char *version;
  883. ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
  884. struct device_driver gen_driver;
  885. int (*probe)(ide_drive_t *);
  886. void (*remove)(ide_drive_t *);
  887. void (*resume)(ide_drive_t *);
  888. void (*shutdown)(ide_drive_t *);
  889. #ifdef CONFIG_IDE_PROC_FS
  890. ide_proc_entry_t * (*proc_entries)(ide_drive_t *);
  891. const struct ide_proc_devset * (*proc_devsets)(ide_drive_t *);
  892. #endif
  893. };
  894. #define to_ide_driver(drv) container_of(drv, struct ide_driver, gen_driver)
  895. int ide_device_get(ide_drive_t *);
  896. void ide_device_put(ide_drive_t *);
  897. struct ide_ioctl_devset {
  898. unsigned int get_ioctl;
  899. unsigned int set_ioctl;
  900. const struct ide_devset *setting;
  901. };
  902. int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int,
  903. unsigned long, const struct ide_ioctl_devset *);
  904. int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long);
  905. extern int ide_vlb_clk;
  906. extern int ide_pci_clk;
  907. int ide_end_rq(ide_drive_t *, struct request *, blk_status_t, unsigned int);
  908. void ide_kill_rq(ide_drive_t *, struct request *);
  909. void ide_insert_request_head(ide_drive_t *, struct request *);
  910. void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
  911. void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
  912. void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *,
  913. unsigned int);
  914. void ide_pad_transfer(ide_drive_t *, int, int);
  915. ide_startstop_t ide_error(ide_drive_t *, const char *, u8);
  916. void ide_fix_driveid(u16 *);
  917. extern void ide_fixstring(u8 *, const int, const int);
  918. int ide_busy_sleep(ide_drive_t *, unsigned long, int);
  919. int __ide_wait_stat(ide_drive_t *, u8, u8, unsigned long, u8 *);
  920. int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
  921. ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *);
  922. ide_startstop_t ide_do_devset(ide_drive_t *, struct request *);
  923. extern ide_startstop_t ide_do_reset (ide_drive_t *);
  924. extern int ide_devset_execute(ide_drive_t *drive,
  925. const struct ide_devset *setting, int arg);
  926. void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
  927. int ide_complete_rq(ide_drive_t *, blk_status_t, unsigned int);
  928. void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd);
  929. void ide_tf_dump(const char *, struct ide_cmd *);
  930. void ide_exec_command(ide_hwif_t *, u8);
  931. u8 ide_read_status(ide_hwif_t *);
  932. u8 ide_read_altstatus(ide_hwif_t *);
  933. void ide_write_devctl(ide_hwif_t *, u8);
  934. void ide_dev_select(ide_drive_t *);
  935. void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8);
  936. void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8);
  937. void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
  938. void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
  939. void SELECT_MASK(ide_drive_t *, int);
  940. u8 ide_read_error(ide_drive_t *);
  941. void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
  942. int ide_check_ireason(ide_drive_t *, struct request *, int, int, int);
  943. int ide_check_atapi_device(ide_drive_t *, const char *);
  944. void ide_init_pc(struct ide_atapi_pc *);
  945. /* Disk head parking */
  946. extern wait_queue_head_t ide_park_wq;
  947. ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
  948. char *buf);
  949. ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
  950. const char *buf, size_t len);
  951. /*
  952. * Special requests for ide-tape block device strategy routine.
  953. *
  954. * In order to service a character device command, we add special requests to
  955. * the tail of our block device request queue and wait for their completion.
  956. */
  957. enum {
  958. REQ_IDETAPE_PC1 = BIT(0), /* packet command (first stage) */
  959. REQ_IDETAPE_PC2 = BIT(1), /* packet command (second stage) */
  960. REQ_IDETAPE_READ = BIT(2),
  961. REQ_IDETAPE_WRITE = BIT(3),
  962. };
  963. int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *,
  964. void *, unsigned int);
  965. int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
  966. int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
  967. int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
  968. void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *);
  969. void ide_retry_pc(ide_drive_t *drive);
  970. void ide_prep_sense(ide_drive_t *drive, struct request *rq);
  971. int ide_queue_sense_rq(ide_drive_t *drive, void *special);
  972. int ide_cd_expiry(ide_drive_t *);
  973. int ide_cd_get_xferlen(struct request *);
  974. ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *);
  975. ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *);
  976. void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int);
  977. void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8);
  978. int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16);
  979. int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *);
  980. int ide_taskfile_ioctl(ide_drive_t *, unsigned long);
  981. int ide_dev_read_id(ide_drive_t *, u8, u16 *, int);
  982. extern int ide_driveid_update(ide_drive_t *);
  983. extern int ide_config_drive_speed(ide_drive_t *, u8);
  984. extern u8 eighty_ninty_three (ide_drive_t *);
  985. extern int taskfile_lib_get_identify(ide_drive_t *drive, u8 *);
  986. extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout);
  987. extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
  988. extern void ide_timer_expiry(struct timer_list *t);
  989. extern irqreturn_t ide_intr(int irq, void *dev_id);
  990. extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
  991. extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
  992. extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
  993. void ide_init_disk(struct gendisk *, ide_drive_t *);
  994. #ifdef CONFIG_IDEPCI_PCIBUS_ORDER
  995. extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
  996. #define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
  997. #else
  998. #define ide_pci_register_driver(d) pci_register_driver(d)
  999. #endif
  1000. static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
  1001. {
  1002. if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5)
  1003. return 1;
  1004. return 0;
  1005. }
  1006. void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *,
  1007. struct ide_hw *, struct ide_hw **);
  1008. void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
  1009. #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
  1010. int ide_pci_set_master(struct pci_dev *, const char *);
  1011. unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
  1012. int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *);
  1013. int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
  1014. #else
  1015. static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
  1016. const struct ide_port_info *d)
  1017. {
  1018. return -EINVAL;
  1019. }
  1020. #endif
  1021. struct ide_pci_enablebit {
  1022. u8 reg; /* byte pci reg holding the enable-bit */
  1023. u8 mask; /* mask to isolate the enable-bit */
  1024. u8 val; /* value of masked reg when "enabled" */
  1025. };
  1026. enum {
  1027. /* Uses ISA control ports not PCI ones. */
  1028. IDE_HFLAG_ISA_PORTS = BIT(0),
  1029. /* single port device */
  1030. IDE_HFLAG_SINGLE = BIT(1),
  1031. /* don't use legacy PIO blacklist */
  1032. IDE_HFLAG_PIO_NO_BLACKLIST = BIT(2),
  1033. /* set for the second port of QD65xx */
  1034. IDE_HFLAG_QD_2ND_PORT = BIT(3),
  1035. /* use PIO8/9 for prefetch off/on */
  1036. IDE_HFLAG_ABUSE_PREFETCH = BIT(4),
  1037. /* use PIO6/7 for fast-devsel off/on */
  1038. IDE_HFLAG_ABUSE_FAST_DEVSEL = BIT(5),
  1039. /* use 100-102 and 200-202 PIO values to set DMA modes */
  1040. IDE_HFLAG_ABUSE_DMA_MODES = BIT(6),
  1041. /*
  1042. * keep DMA setting when programming PIO mode, may be used only
  1043. * for hosts which have separate PIO and DMA timings (ie. PMAC)
  1044. */
  1045. IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = BIT(7),
  1046. /* program host for the transfer mode after programming device */
  1047. IDE_HFLAG_POST_SET_MODE = BIT(8),
  1048. /* don't program host/device for the transfer mode ("smart" hosts) */
  1049. IDE_HFLAG_NO_SET_MODE = BIT(9),
  1050. /* trust BIOS for programming chipset/device for DMA */
  1051. IDE_HFLAG_TRUST_BIOS_FOR_DMA = BIT(10),
  1052. /* host is CS5510/CS5520 */
  1053. IDE_HFLAG_CS5520 = BIT(11),
  1054. /* ATAPI DMA is unsupported */
  1055. IDE_HFLAG_NO_ATAPI_DMA = BIT(12),
  1056. /* set if host is a "non-bootable" controller */
  1057. IDE_HFLAG_NON_BOOTABLE = BIT(13),
  1058. /* host doesn't support DMA */
  1059. IDE_HFLAG_NO_DMA = BIT(14),
  1060. /* check if host is PCI IDE device before allowing DMA */
  1061. IDE_HFLAG_NO_AUTODMA = BIT(15),
  1062. /* host uses MMIO */
  1063. IDE_HFLAG_MMIO = BIT(16),
  1064. /* no LBA48 */
  1065. IDE_HFLAG_NO_LBA48 = BIT(17),
  1066. /* no LBA48 DMA */
  1067. IDE_HFLAG_NO_LBA48_DMA = BIT(18),
  1068. /* data FIFO is cleared by an error */
  1069. IDE_HFLAG_ERROR_STOPS_FIFO = BIT(19),
  1070. /* serialize ports */
  1071. IDE_HFLAG_SERIALIZE = BIT(20),
  1072. /* host is DTC2278 */
  1073. IDE_HFLAG_DTC2278 = BIT(21),
  1074. /* 4 devices on a single set of I/O ports */
  1075. IDE_HFLAG_4DRIVES = BIT(22),
  1076. /* host is TRM290 */
  1077. IDE_HFLAG_TRM290 = BIT(23),
  1078. /* use 32-bit I/O ops */
  1079. IDE_HFLAG_IO_32BIT = BIT(24),
  1080. /* unmask IRQs */
  1081. IDE_HFLAG_UNMASK_IRQS = BIT(25),
  1082. IDE_HFLAG_BROKEN_ALTSTATUS = BIT(26),
  1083. /* serialize ports if DMA is possible (for sl82c105) */
  1084. IDE_HFLAG_SERIALIZE_DMA = BIT(27),
  1085. /* force host out of "simplex" mode */
  1086. IDE_HFLAG_CLEAR_SIMPLEX = BIT(28),
  1087. /* DSC overlap is unsupported */
  1088. IDE_HFLAG_NO_DSC = BIT(29),
  1089. /* never use 32-bit I/O ops */
  1090. IDE_HFLAG_NO_IO_32BIT = BIT(30),
  1091. /* never unmask IRQs */
  1092. IDE_HFLAG_NO_UNMASK_IRQS = BIT(31),
  1093. };
  1094. #ifdef CONFIG_BLK_DEV_OFFBOARD
  1095. # define IDE_HFLAG_OFF_BOARD 0
  1096. #else
  1097. # define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE
  1098. #endif
  1099. struct ide_port_info {
  1100. char *name;
  1101. int (*init_chipset)(struct pci_dev *);
  1102. void (*get_lock)(irq_handler_t, void *);
  1103. void (*release_lock)(void);
  1104. void (*init_iops)(ide_hwif_t *);
  1105. void (*init_hwif)(ide_hwif_t *);
  1106. int (*init_dma)(ide_hwif_t *,
  1107. const struct ide_port_info *);
  1108. const struct ide_tp_ops *tp_ops;
  1109. const struct ide_port_ops *port_ops;
  1110. const struct ide_dma_ops *dma_ops;
  1111. struct ide_pci_enablebit enablebits[2];
  1112. hwif_chipset_t chipset;
  1113. u16 max_sectors; /* if < than the default one */
  1114. u32 host_flags;
  1115. int irq_flags;
  1116. u8 pio_mask;
  1117. u8 swdma_mask;
  1118. u8 mwdma_mask;
  1119. u8 udma_mask;
  1120. };
  1121. /*
  1122. * State information carried for REQ_TYPE_ATA_PM_SUSPEND and REQ_TYPE_ATA_PM_RESUME
  1123. * requests.
  1124. */
  1125. struct ide_pm_state {
  1126. /* PM state machine step value, currently driver specific */
  1127. int pm_step;
  1128. /* requested PM state value (S1, S2, S3, S4, ...) */
  1129. u32 pm_state;
  1130. void* data; /* for driver use */
  1131. };
  1132. int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
  1133. int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
  1134. const struct ide_port_info *, void *);
  1135. void ide_pci_remove(struct pci_dev *);
  1136. #ifdef CONFIG_PM
  1137. int ide_pci_suspend(struct pci_dev *, pm_message_t);
  1138. int ide_pci_resume(struct pci_dev *);
  1139. #else
  1140. #define ide_pci_suspend NULL
  1141. #define ide_pci_resume NULL
  1142. #endif
  1143. void ide_map_sg(ide_drive_t *, struct ide_cmd *);
  1144. void ide_init_sg_cmd(struct ide_cmd *, unsigned int);
  1145. #define BAD_DMA_DRIVE 0
  1146. #define GOOD_DMA_DRIVE 1
  1147. struct drive_list_entry {
  1148. const char *id_model;
  1149. const char *id_firmware;
  1150. };
  1151. int ide_in_drive_list(u16 *, const struct drive_list_entry *);
  1152. #ifdef CONFIG_BLK_DEV_IDEDMA
  1153. int ide_dma_good_drive(ide_drive_t *);
  1154. int __ide_dma_bad_drive(ide_drive_t *);
  1155. u8 ide_find_dma_mode(ide_drive_t *, u8);
  1156. static inline u8 ide_max_dma_mode(ide_drive_t *drive)
  1157. {
  1158. return ide_find_dma_mode(drive, XFER_UDMA_6);
  1159. }
  1160. void ide_dma_off_quietly(ide_drive_t *);
  1161. void ide_dma_off(ide_drive_t *);
  1162. void ide_dma_on(ide_drive_t *);
  1163. int ide_set_dma(ide_drive_t *);
  1164. void ide_check_dma_crc(ide_drive_t *);
  1165. ide_startstop_t ide_dma_intr(ide_drive_t *);
  1166. int ide_allocate_dma_engine(ide_hwif_t *);
  1167. void ide_release_dma_engine(ide_hwif_t *);
  1168. int ide_dma_prepare(ide_drive_t *, struct ide_cmd *);
  1169. void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *);
  1170. #ifdef CONFIG_BLK_DEV_IDEDMA_SFF
  1171. int config_drive_for_dma(ide_drive_t *);
  1172. int ide_build_dmatable(ide_drive_t *, struct ide_cmd *);
  1173. void ide_dma_host_set(ide_drive_t *, int);
  1174. int ide_dma_setup(ide_drive_t *, struct ide_cmd *);
  1175. extern void ide_dma_start(ide_drive_t *);
  1176. int ide_dma_end(ide_drive_t *);
  1177. int ide_dma_test_irq(ide_drive_t *);
  1178. int ide_dma_sff_timer_expiry(ide_drive_t *);
  1179. u8 ide_dma_sff_read_status(ide_hwif_t *);
  1180. extern const struct ide_dma_ops sff_dma_ops;
  1181. #else
  1182. static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
  1183. #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
  1184. void ide_dma_lost_irq(ide_drive_t *);
  1185. ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
  1186. #else
  1187. static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
  1188. static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
  1189. static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
  1190. static inline void ide_dma_off(ide_drive_t *drive) { ; }
  1191. static inline void ide_dma_on(ide_drive_t *drive) { ; }
  1192. static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
  1193. static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
  1194. static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
  1195. static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; }
  1196. static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
  1197. static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
  1198. static inline int ide_dma_prepare(ide_drive_t *drive,
  1199. struct ide_cmd *cmd) { return 1; }
  1200. static inline void ide_dma_unmap_sg(ide_drive_t *drive,
  1201. struct ide_cmd *cmd) { ; }
  1202. #endif /* CONFIG_BLK_DEV_IDEDMA */
  1203. #ifdef CONFIG_BLK_DEV_IDEACPI
  1204. int ide_acpi_init(void);
  1205. bool ide_port_acpi(ide_hwif_t *hwif);
  1206. extern int ide_acpi_exec_tfs(ide_drive_t *drive);
  1207. extern void ide_acpi_get_timing(ide_hwif_t *hwif);
  1208. extern void ide_acpi_push_timing(ide_hwif_t *hwif);
  1209. void ide_acpi_init_port(ide_hwif_t *);
  1210. void ide_acpi_port_init_devices(ide_hwif_t *);
  1211. extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
  1212. #else
  1213. static inline int ide_acpi_init(void) { return 0; }
  1214. static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; }
  1215. static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
  1216. static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
  1217. static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
  1218. static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; }
  1219. static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; }
  1220. static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
  1221. #endif
  1222. void ide_register_region(struct gendisk *);
  1223. void ide_unregister_region(struct gendisk *);
  1224. void ide_check_nien_quirk_list(ide_drive_t *);
  1225. void ide_undecoded_slave(ide_drive_t *);
  1226. void ide_port_apply_params(ide_hwif_t *);
  1227. int ide_sysfs_register_port(ide_hwif_t *);
  1228. struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **,
  1229. unsigned int);
  1230. void ide_host_free(struct ide_host *);
  1231. int ide_host_register(struct ide_host *, const struct ide_port_info *,
  1232. struct ide_hw **);
  1233. int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int,
  1234. struct ide_host **);
  1235. void ide_host_remove(struct ide_host *);
  1236. int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
  1237. void ide_port_unregister_devices(ide_hwif_t *);
  1238. void ide_port_scan(ide_hwif_t *);
  1239. static inline void *ide_get_hwifdata (ide_hwif_t * hwif)
  1240. {
  1241. return hwif->hwif_data;
  1242. }
  1243. static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
  1244. {
  1245. hwif->hwif_data = data;
  1246. }
  1247. u64 ide_get_lba_addr(struct ide_cmd *, int);
  1248. u8 ide_dump_status(ide_drive_t *, const char *, u8);
  1249. struct ide_timing {
  1250. u8 mode;
  1251. u8 setup; /* t1 */
  1252. u16 act8b; /* t2 for 8-bit io */
  1253. u16 rec8b; /* t2i for 8-bit io */
  1254. u16 cyc8b; /* t0 for 8-bit io */
  1255. u16 active; /* t2 or tD */
  1256. u16 recover; /* t2i or tK */
  1257. u16 cycle; /* t0 */
  1258. u16 udma; /* t2CYCTYP/2 */
  1259. };
  1260. enum {
  1261. IDE_TIMING_SETUP = BIT(0),
  1262. IDE_TIMING_ACT8B = BIT(1),
  1263. IDE_TIMING_REC8B = BIT(2),
  1264. IDE_TIMING_CYC8B = BIT(3),
  1265. IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
  1266. IDE_TIMING_CYC8B,
  1267. IDE_TIMING_ACTIVE = BIT(4),
  1268. IDE_TIMING_RECOVER = BIT(5),
  1269. IDE_TIMING_CYCLE = BIT(6),
  1270. IDE_TIMING_UDMA = BIT(7),
  1271. IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT |
  1272. IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
  1273. IDE_TIMING_CYCLE | IDE_TIMING_UDMA,
  1274. };
  1275. struct ide_timing *ide_timing_find_mode(u8);
  1276. u16 ide_pio_cycle_time(ide_drive_t *, u8);
  1277. void ide_timing_merge(struct ide_timing *, struct ide_timing *,
  1278. struct ide_timing *, unsigned int);
  1279. int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
  1280. #ifdef CONFIG_IDE_XFER_MODE
  1281. int ide_scan_pio_blacklist(char *);
  1282. const char *ide_xfer_verbose(u8);
  1283. int ide_pio_need_iordy(ide_drive_t *, const u8);
  1284. int ide_set_pio_mode(ide_drive_t *, u8);
  1285. int ide_set_dma_mode(ide_drive_t *, u8);
  1286. void ide_set_pio(ide_drive_t *, u8);
  1287. int ide_set_xfer_rate(ide_drive_t *, u8);
  1288. #else
  1289. static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; }
  1290. static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; }
  1291. #endif
  1292. static inline void ide_set_max_pio(ide_drive_t *drive)
  1293. {
  1294. ide_set_pio(drive, 255);
  1295. }
  1296. char *ide_media_string(ide_drive_t *);
  1297. extern const struct attribute_group *ide_dev_groups[];
  1298. extern struct bus_type ide_bus_type;
  1299. extern struct class *ide_port_class;
  1300. static inline void ide_dump_identify(u8 *id)
  1301. {
  1302. print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0);
  1303. }
  1304. static inline int hwif_to_node(ide_hwif_t *hwif)
  1305. {
  1306. return hwif->dev ? dev_to_node(hwif->dev) : -1;
  1307. }
  1308. static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive)
  1309. {
  1310. ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1];
  1311. return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL;
  1312. }
  1313. static inline void *ide_get_drivedata(ide_drive_t *drive)
  1314. {
  1315. return drive->drive_data;
  1316. }
  1317. static inline void ide_set_drivedata(ide_drive_t *drive, void *data)
  1318. {
  1319. drive->drive_data = data;
  1320. }
  1321. #define ide_port_for_each_dev(i, dev, port) \
  1322. for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++)
  1323. #define ide_port_for_each_present_dev(i, dev, port) \
  1324. for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \
  1325. if ((dev)->dev_flags & IDE_DFLAG_PRESENT)
  1326. #define ide_host_for_each_port(i, port, host) \
  1327. for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
  1328. #endif /* _IDE_H */