tmp_suning_uos_patched/drivers/ata/pata_efar.c
Tejun Heo a1efdaba2d libata: make reset related methods proper port operations
Currently reset methods are not specified directly in the
ata_port_operations table.  If a LLD wants to use custom reset
methods, it should construct and use a error_handler which uses those
reset methods.  It's done this way for two reasons.

First, the ops table already contained too many methods and adding
four more of them would noticeably increase the amount of necessary
boilerplate code all over low level drivers.

Second, as ->error_handler uses those reset methods, it can get
confusing.  ie. By overriding ->error_handler, those reset ops can be
made useless making layering a bit hazy.

Now that ops table uses inheritance, the first problem doesn't exist
anymore.  The second isn't completely solved but is relieved by
providing default values - most drivers can just override what it has
implemented and don't have to concern itself about higher level
callbacks.  In fact, there currently is no driver which actually
modifies error handling behavior.  Drivers which override
->error_handler just wraps the standard error handler only to prepare
the controller for EH.  I don't think making ops layering strict has
any noticeable benefit.

This patch makes ->prereset, ->softreset, ->hardreset, ->postreset and
their PMP counterparts propoer ops.  Default ops are provided in the
base ops tables and drivers are converted to override individual reset
methods instead of creating custom error_handler.

* ata_std_error_handler() doesn't use sata_std_hardreset() if SCRs
  aren't accessible.  sata_promise doesn't need to use separate
  error_handlers for PATA and SATA anymore.

* softreset is broken for sata_inic162x and sata_sx4.  As libata now
  always prefers hardreset, this doesn't really matter but the ops are
  forced to NULL using ATA_OP_NULL for documentation purpose.

* pata_hpt374 needs to use different prereset for the first and second
  PCI functions.  This used to be done by branching from
  hpt374_error_handler().  The proper way to do this is to use
  separate ops and port_info tables for each function.  Converted.

Signed-off-by: Tejun Heo <htejun@gmail.com>
2008-04-17 15:44:18 -04:00

304 lines
7.8 KiB
C

/*
* pata_efar.c - EFAR PIIX clone controller driver
*
* (C) 2005 Red Hat <alan@redhat.com>
*
* Some parts based on ata_piix.c by Jeff Garzik and others.
*
* The EFAR is a PIIX4 clone with UDMA66 support. Unlike the later
* Intel ICH controllers the EFAR widened the UDMA mode register bits
* and doesn't require the funky clock selection.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_efar"
#define DRV_VERSION "0.4.4"
/**
* efar_pre_reset - Enable bits
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
* Perform cable detection for the EFAR ATA interface. This is
* different to the PIIX arrangement
*/
static int efar_pre_reset(struct ata_link *link, unsigned long deadline)
{
static const struct pci_bits efar_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
};
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no]))
return -ENOENT;
return ata_std_prereset(link, deadline);
}
/**
* efar_cable_detect - check for 40/80 pin
* @ap: Port
*
* Perform cable detection for the EFAR ATA interface. This is
* different to the PIIX arrangement
*/
static int efar_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 tmp;
pci_read_config_byte(pdev, 0x47, &tmp);
if (tmp & (2 >> ap->port_no))
return ATA_CBL_PATA40;
return ATA_CBL_PATA80;
}
/**
* efar_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: um
*
* Set PIO mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
u16 idetm_data;
int control = 0;
/*
* See Intel Document 298600-004 for the timing programing rules
* for PIIX/ICH. The EFAR is a clone so very similar
*/
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
if (pio > 2)
control |= 1; /* TIME1 enable */
if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */
control |= 2; /* IE enable */
/* Intel specifies that the PPE functionality is for disk only */
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE enable */
pci_read_config_word(dev, idetm_port, &idetm_data);
/* Enable PPE, IE and TIME as appropriate */
if (adev->devno == 0) {
idetm_data &= 0xCCF0;
idetm_data |= control;
idetm_data |= (timings[pio][0] << 12) |
(timings[pio][1] << 8);
} else {
int shift = 4 * ap->port_no;
u8 slave_data;
idetm_data &= 0xCC0F;
idetm_data |= (control << 4);
/* Slave timing in separate register */
pci_read_config_byte(dev, 0x44, &slave_data);
slave_data &= 0x0F << shift;
slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << shift;
pci_write_config_byte(dev, 0x44, slave_data);
}
idetm_data |= 0x4000; /* Ensure SITRE is enabled */
pci_write_config_word(dev, idetm_port, idetm_data);
}
/**
* efar_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: Device to program
*
* Set UDMA/MWDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void efar_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
u8 master_port = ap->port_no ? 0x42 : 0x40;
u16 master_data;
u8 speed = adev->dma_mode;
int devid = adev->devno + 2 * ap->port_no;
u8 udma_enable;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
pci_read_config_word(dev, master_port, &master_data);
pci_read_config_byte(dev, 0x48, &udma_enable);
if (speed >= XFER_UDMA_0) {
unsigned int udma = adev->dma_mode - XFER_UDMA_0;
u16 udma_timing;
udma_enable |= (1 << devid);
/* Load the UDMA mode number */
pci_read_config_word(dev, 0x4A, &udma_timing);
udma_timing &= ~(7 << (4 * devid));
udma_timing |= udma << (4 * devid);
pci_write_config_word(dev, 0x4A, udma_timing);
} else {
/*
* MWDMA is driven by the PIO timings. We must also enable
* IORDY unconditionally along with TIME1. PPE has already
* been set when the PIO timing was set.
*/
unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
unsigned int control;
u8 slave_data;
const unsigned int needed_pio[3] = {
XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
};
int pio = needed_pio[mwdma] - XFER_PIO_0;
control = 3; /* IORDY|TIME1 */
/* If the drive MWDMA is faster than it can do PIO then
we must force PIO into PIO0 */
if (adev->pio_mode < needed_pio[mwdma])
/* Enable DMA timing only */
control |= 8; /* PIO cycles in PIO0 */
if (adev->devno) { /* Slave */
master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
master_data |= control << 4;
pci_read_config_byte(dev, 0x44, &slave_data);
slave_data &= (0x0F + 0xE1 * ap->port_no);
/* Load the matching timing */
slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
pci_write_config_byte(dev, 0x44, slave_data);
} else { /* Master */
master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
and master timing bits */
master_data |= control;
master_data |=
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
}
udma_enable &= ~(1 << devid);
pci_write_config_word(dev, master_port, master_data);
}
pci_write_config_byte(dev, 0x48, udma_enable);
}
static struct scsi_host_template efar_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations efar_ops = {
.inherits = &ata_bmdma_port_ops,
.cable_detect = efar_cable_detect,
.set_piomode = efar_set_piomode,
.set_dmamode = efar_set_dmamode,
.prereset = efar_pre_reset,
};
/**
* efar_init_one - Register EFAR ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in efar_pci_tbl matching with @pdev
*
* Called from kernel PCI layer.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma1-2 */
.udma_mask = 0x0f, /* UDMA 66 */
.port_ops = &efar_ops,
};
const struct ata_port_info *ppi[] = { &info, NULL };
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
return ata_pci_init_one(pdev, ppi, &efar_sht, NULL);
}
static const struct pci_device_id efar_pci_tbl[] = {
{ PCI_VDEVICE(EFAR, 0x9130), },
{ } /* terminate list */
};
static struct pci_driver efar_pci_driver = {
.name = DRV_NAME,
.id_table = efar_pci_tbl,
.probe = efar_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = ata_pci_device_resume,
#endif
};
static int __init efar_init(void)
{
return pci_register_driver(&efar_pci_driver);
}
static void __exit efar_exit(void)
{
pci_unregister_driver(&efar_pci_driver);
}
module_init(efar_init);
module_exit(efar_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for EFAR PIIX clones");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, efar_pci_tbl);
MODULE_VERSION(DRV_VERSION);