forked from luck/tmp_suning_uos_patched
Merge branch 'for-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata
Pull libata updates from Tejun Heo: - Write same support added - Minor ahci MSIX irq handling updates - Non-critical SCSI command translation fixes - Controller specific changes * 'for-4.9' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata: ahci: qoriq: Revert "ahci: qoriq: Disable NCQ on ls2080a SoC" libata: remove <asm-generic/libata-portmap.h> libata: remove unused definitions from <asm/libata-portmap.h> pata_at91: Use PTR_ERR_OR_ZERO rather than if(IS_ERR(...)) + PTR_ERR ata: Replace BUG() with BUG_ON(). ata: sata_mv: Replacing dma_pool_alloc and memset with a single call dma_pool_zalloc. libata: Some drives failing on SCT Write Same ahci: use pci_alloc_irq_vectors libata: SCT Write Same handle ATA_DFLAG_PIO libata: SCT Write Same / DSM Trim libata: Add support for SCT Write Same libata: Safely overwrite attached page in WRITE SAME xlat ahci: also use a per-port lock for the multi-MSIX case ARM: dts: STiH407-family: Add ports-implemented property in sata nodes ahci: st: Add ports-implemented property in support ahci: qoriq: enable snoopable sata read and write ahci: qoriq: adjust sata parameter libata-scsi: fix MODE SELECT translation for Control mode page libata-scsi: use u8 array to store mode page copy
This commit is contained in:
commit
f96ed26122
|
@ -502,10 +502,11 @@ usb2: usb3@3100000 {
|
|||
};
|
||||
|
||||
sata: sata@3200000 {
|
||||
compatible = "fsl,ls1043a-ahci", "fsl,ls1021a-ahci";
|
||||
compatible = "fsl,ls1043a-ahci";
|
||||
reg = <0x0 0x3200000 0x0 0x10000>;
|
||||
interrupts = <0 69 0x4>;
|
||||
clocks = <&clockgen 4 0>;
|
||||
dma-coherent;
|
||||
};
|
||||
|
||||
msi1: msi-controller1@1571000 {
|
||||
|
|
|
@ -683,6 +683,7 @@ sata0: sata@3200000 {
|
|||
reg = <0x0 0x3200000 0x0 0x10000>;
|
||||
interrupts = <0 133 0x4>; /* Level high type */
|
||||
clocks = <&clockgen 4 3>;
|
||||
dma-coherent;
|
||||
};
|
||||
|
||||
sata1: sata@3210000 {
|
||||
|
@ -691,6 +692,7 @@ sata1: sata@3210000 {
|
|||
reg = <0x0 0x3210000 0x0 0x10000>;
|
||||
interrupts = <0 136 0x4>; /* Level high type */
|
||||
clocks = <&clockgen 4 3>;
|
||||
dma-coherent;
|
||||
};
|
||||
|
||||
usb0: usb3@3100000 {
|
||||
|
|
|
@ -1,12 +1,8 @@
|
|||
#ifndef __ASM_IA64_LIBATA_PORTMAP_H
|
||||
#define __ASM_IA64_LIBATA_PORTMAP_H
|
||||
|
||||
#define ATA_PRIMARY_CMD 0x1F0
|
||||
#define ATA_PRIMARY_CTL 0x3F6
|
||||
#define ATA_PRIMARY_IRQ(dev) isa_irq_to_vector(14)
|
||||
|
||||
#define ATA_SECONDARY_CMD 0x170
|
||||
#define ATA_SECONDARY_CTL 0x376
|
||||
#define ATA_SECONDARY_IRQ(dev) isa_irq_to_vector(15)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,12 +1,8 @@
|
|||
#ifndef __ASM_POWERPC_LIBATA_PORTMAP_H
|
||||
#define __ASM_POWERPC_LIBATA_PORTMAP_H
|
||||
|
||||
#define ATA_PRIMARY_CMD 0x1F0
|
||||
#define ATA_PRIMARY_CTL 0x3F6
|
||||
#define ATA_PRIMARY_IRQ(dev) pci_get_legacy_ide_irq(dev, 0)
|
||||
|
||||
#define ATA_SECONDARY_CMD 0x170
|
||||
#define ATA_SECONDARY_CTL 0x376
|
||||
#define ATA_SECONDARY_IRQ(dev) pci_get_legacy_ide_irq(dev, 1)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1400,142 +1400,56 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* ahci_init_msix() - optionally enable per-port MSI-X otherwise defer
|
||||
* to single msi.
|
||||
*/
|
||||
static int ahci_init_msix(struct pci_dev *pdev, unsigned int n_ports,
|
||||
struct ahci_host_priv *hpriv, unsigned long flags)
|
||||
static int ahci_get_irq_vector(struct ata_host *host, int port)
|
||||
{
|
||||
int nvec, i, rc;
|
||||
|
||||
/* Do not init MSI-X if MSI is disabled for the device */
|
||||
if (hpriv->flags & AHCI_HFLAG_NO_MSI)
|
||||
return -ENODEV;
|
||||
|
||||
nvec = pci_msix_vec_count(pdev);
|
||||
if (nvec < 0)
|
||||
return nvec;
|
||||
|
||||
/*
|
||||
* Proper MSI-X implementations will have a vector per-port.
|
||||
* Barring that, we prefer single-MSI over single-MSIX. If this
|
||||
* check fails (not enough MSI-X vectors for all ports) we will
|
||||
* be called again with the flag clear iff ahci_init_msi()
|
||||
* fails.
|
||||
*/
|
||||
if (flags & AHCI_HFLAG_MULTI_MSIX) {
|
||||
if (nvec < n_ports)
|
||||
return -ENODEV;
|
||||
nvec = n_ports;
|
||||
} else if (nvec) {
|
||||
nvec = 1;
|
||||
} else {
|
||||
/*
|
||||
* Emit dev_err() since this was the non-legacy irq
|
||||
* method of last resort.
|
||||
*/
|
||||
rc = -ENODEV;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < nvec; i++)
|
||||
hpriv->msix[i].entry = i;
|
||||
rc = pci_enable_msix_exact(pdev, hpriv->msix, nvec);
|
||||
if (rc < 0)
|
||||
goto fail;
|
||||
|
||||
if (nvec > 1)
|
||||
hpriv->flags |= AHCI_HFLAG_MULTI_MSIX;
|
||||
hpriv->irq = hpriv->msix[0].vector; /* for single msi-x */
|
||||
|
||||
return nvec;
|
||||
fail:
|
||||
dev_err(&pdev->dev,
|
||||
"failed to enable MSI-X with error %d, # of vectors: %d\n",
|
||||
rc, nvec);
|
||||
|
||||
return rc;
|
||||
return pci_irq_vector(to_pci_dev(host->dev), port);
|
||||
}
|
||||
|
||||
static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
|
||||
struct ahci_host_priv *hpriv)
|
||||
{
|
||||
int rc, nvec;
|
||||
int nvec;
|
||||
|
||||
if (hpriv->flags & AHCI_HFLAG_NO_MSI)
|
||||
return -ENODEV;
|
||||
|
||||
nvec = pci_msi_vec_count(pdev);
|
||||
if (nvec < 0)
|
||||
return nvec;
|
||||
|
||||
/*
|
||||
* If number of MSIs is less than number of ports then Sharing Last
|
||||
* Message mode could be enforced. In this case assume that advantage
|
||||
* of multipe MSIs is negated and use single MSI mode instead.
|
||||
*/
|
||||
if (nvec < n_ports)
|
||||
goto single_msi;
|
||||
nvec = pci_alloc_irq_vectors(pdev, n_ports, INT_MAX,
|
||||
PCI_IRQ_MSIX | PCI_IRQ_MSI);
|
||||
if (nvec > 0) {
|
||||
if (!(readl(hpriv->mmio + HOST_CTL) & HOST_MRSM)) {
|
||||
hpriv->get_irq_vector = ahci_get_irq_vector;
|
||||
hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
|
||||
return nvec;
|
||||
}
|
||||
|
||||
rc = pci_enable_msi_exact(pdev, nvec);
|
||||
if (rc == -ENOSPC)
|
||||
goto single_msi;
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
/* fallback to single MSI mode if the controller enforced MRSM mode */
|
||||
if (readl(hpriv->mmio + HOST_CTL) & HOST_MRSM) {
|
||||
pci_disable_msi(pdev);
|
||||
/*
|
||||
* Fallback to single MSI mode if the controller enforced MRSM
|
||||
* mode.
|
||||
*/
|
||||
printk(KERN_INFO "ahci: MRSM is on, fallback to single MSI\n");
|
||||
goto single_msi;
|
||||
pci_free_irq_vectors(pdev);
|
||||
}
|
||||
|
||||
if (nvec > 1)
|
||||
hpriv->flags |= AHCI_HFLAG_MULTI_MSI;
|
||||
|
||||
goto out;
|
||||
|
||||
single_msi:
|
||||
nvec = 1;
|
||||
|
||||
rc = pci_enable_msi(pdev);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
out:
|
||||
hpriv->irq = pdev->irq;
|
||||
|
||||
return nvec;
|
||||
}
|
||||
|
||||
static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
|
||||
struct ahci_host_priv *hpriv)
|
||||
{
|
||||
int nvec;
|
||||
/*
|
||||
* -ENOSPC indicated we don't have enough vectors. Don't bother trying
|
||||
* a single vectors for any other error:
|
||||
*/
|
||||
if (nvec < 0 && nvec != -ENOSPC)
|
||||
return nvec;
|
||||
|
||||
/*
|
||||
* Try to enable per-port MSI-X. If the host is not capable
|
||||
* fall back to single MSI before finally attempting single
|
||||
* MSI-X.
|
||||
* If the host is not capable of supporting per-port vectors, fall
|
||||
* back to single MSI before finally attempting single MSI-X.
|
||||
*/
|
||||
nvec = ahci_init_msix(pdev, n_ports, hpriv, AHCI_HFLAG_MULTI_MSIX);
|
||||
if (nvec >= 0)
|
||||
nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
|
||||
if (nvec == 1)
|
||||
return nvec;
|
||||
|
||||
nvec = ahci_init_msi(pdev, n_ports, hpriv);
|
||||
if (nvec >= 0)
|
||||
return nvec;
|
||||
|
||||
/* try single-msix */
|
||||
nvec = ahci_init_msix(pdev, n_ports, hpriv, 0);
|
||||
if (nvec >= 0)
|
||||
return nvec;
|
||||
|
||||
/* legacy intx interrupts */
|
||||
pci_intx(pdev, 1);
|
||||
hpriv->irq = pdev->irq;
|
||||
|
||||
return 0;
|
||||
return pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX);
|
||||
}
|
||||
|
||||
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
@ -1698,11 +1612,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (!host)
|
||||
return -ENOMEM;
|
||||
host->private_data = hpriv;
|
||||
hpriv->msix = devm_kzalloc(&pdev->dev,
|
||||
sizeof(struct msix_entry) * n_ports, GFP_KERNEL);
|
||||
if (!hpriv->msix)
|
||||
return -ENOMEM;
|
||||
ahci_init_interrupts(pdev, n_ports, hpriv);
|
||||
|
||||
if (ahci_init_msi(pdev, n_ports, hpriv) < 0) {
|
||||
/* legacy intx interrupts */
|
||||
pci_intx(pdev, 1);
|
||||
}
|
||||
hpriv->irq = pdev->irq;
|
||||
|
||||
if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
|
||||
host->flags |= ATA_HOST_PARALLEL_SCAN;
|
||||
|
|
|
@ -242,12 +242,10 @@ enum {
|
|||
AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */
|
||||
AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */
|
||||
AHCI_HFLAG_MULTI_MSI = (1 << 20), /* per-port MSI(-X) */
|
||||
#else
|
||||
/* compile out MSI infrastructure */
|
||||
AHCI_HFLAG_MULTI_MSI = 0,
|
||||
AHCI_HFLAG_MULTI_MSIX = 0,
|
||||
#endif
|
||||
AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */
|
||||
|
||||
|
@ -351,7 +349,6 @@ struct ahci_host_priv {
|
|||
* the PHY position in this array.
|
||||
*/
|
||||
struct phy **phys;
|
||||
struct msix_entry *msix; /* Optional MSI-X support */
|
||||
unsigned nports; /* Number of ports */
|
||||
void *plat_data; /* Other platform data */
|
||||
unsigned int irq; /* interrupt line */
|
||||
|
@ -362,22 +359,11 @@ struct ahci_host_priv {
|
|||
*/
|
||||
void (*start_engine)(struct ata_port *ap);
|
||||
irqreturn_t (*irq_handler)(int irq, void *dev_instance);
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port)
|
||||
{
|
||||
if (hpriv->flags & AHCI_HFLAG_MULTI_MSIX)
|
||||
return hpriv->msix[port].vector;
|
||||
else
|
||||
return hpriv->irq + port;
|
||||
}
|
||||
#else
|
||||
static inline int ahci_irq_vector(struct ahci_host_priv *hpriv, int port)
|
||||
{
|
||||
return hpriv->irq;
|
||||
}
|
||||
#endif
|
||||
/* only required for per-port MSI(-X) support */
|
||||
int (*get_irq_vector)(struct ata_host *host,
|
||||
int port);
|
||||
};
|
||||
|
||||
extern int ahci_ignore_sss;
|
||||
|
||||
|
|
|
@ -30,24 +30,23 @@
|
|||
#define PORT_PHY3 0xB0
|
||||
#define PORT_PHY4 0xB4
|
||||
#define PORT_PHY5 0xB8
|
||||
#define PORT_AXICC 0xBC
|
||||
#define PORT_TRANS 0xC8
|
||||
|
||||
/* port register default value */
|
||||
#define AHCI_PORT_PHY_1_CFG 0xa003fffe
|
||||
#define AHCI_PORT_TRANS_CFG 0x08000029
|
||||
#define AHCI_PORT_AXICC_CFG 0x3fffffff
|
||||
|
||||
/* for ls1021a */
|
||||
#define LS1021A_PORT_PHY2 0x28183414
|
||||
#define LS1021A_PORT_PHY3 0x0e080e06
|
||||
#define LS1021A_PORT_PHY4 0x064a080b
|
||||
#define LS1021A_PORT_PHY5 0x2aa86470
|
||||
#define LS1021A_AXICC_ADDR 0xC0
|
||||
|
||||
#define SATA_ECC_DISABLE 0x00020000
|
||||
|
||||
/* for ls1043a */
|
||||
#define LS1043A_PORT_PHY2 0x28184d1f
|
||||
#define LS1043A_PORT_PHY3 0x0e081509
|
||||
|
||||
enum ahci_qoriq_type {
|
||||
AHCI_LS1021A,
|
||||
AHCI_LS1043A,
|
||||
|
@ -137,7 +136,7 @@ static struct ata_port_operations ahci_qoriq_ops = {
|
|||
.hardreset = ahci_qoriq_hardreset,
|
||||
};
|
||||
|
||||
static struct ata_port_info ahci_qoriq_port_info = {
|
||||
static const struct ata_port_info ahci_qoriq_port_info = {
|
||||
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NCQ,
|
||||
.pio_mask = ATA_PIO4,
|
||||
.udma_mask = ATA_UDMA6,
|
||||
|
@ -162,18 +161,19 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
|
|||
writel(LS1021A_PORT_PHY4, reg_base + PORT_PHY4);
|
||||
writel(LS1021A_PORT_PHY5, reg_base + PORT_PHY5);
|
||||
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
|
||||
writel(AHCI_PORT_AXICC_CFG, reg_base + LS1021A_AXICC_ADDR);
|
||||
break;
|
||||
|
||||
case AHCI_LS1043A:
|
||||
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
|
||||
writel(LS1043A_PORT_PHY2, reg_base + PORT_PHY2);
|
||||
writel(LS1043A_PORT_PHY3, reg_base + PORT_PHY3);
|
||||
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
|
||||
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
|
||||
break;
|
||||
|
||||
case AHCI_LS2080A:
|
||||
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
|
||||
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
|
||||
writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -221,12 +221,6 @@ static int ahci_qoriq_probe(struct platform_device *pdev)
|
|||
if (rc)
|
||||
goto disable_resources;
|
||||
|
||||
/* Workaround for ls2080a */
|
||||
if (qoriq_priv->type == AHCI_LS2080A) {
|
||||
hpriv->flags |= AHCI_HFLAG_NO_NCQ;
|
||||
ahci_qoriq_port_info.flags &= ~ATA_FLAG_NCQ;
|
||||
}
|
||||
|
||||
rc = ahci_platform_init_host(pdev, hpriv, &ahci_qoriq_port_info,
|
||||
&ahci_qoriq_sht);
|
||||
if (rc)
|
||||
|
|
|
@ -147,6 +147,7 @@ static struct scsi_host_template ahci_platform_sht = {
|
|||
|
||||
static int st_ahci_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct st_ahci_drv_data *drv_data;
|
||||
struct ahci_host_priv *hpriv;
|
||||
int err;
|
||||
|
@ -170,6 +171,9 @@ static int st_ahci_probe(struct platform_device *pdev)
|
|||
|
||||
st_ahci_configure_oob(hpriv->mmio);
|
||||
|
||||
of_property_read_u32(dev->of_node,
|
||||
"ports-implemented", &hpriv->force_port_map);
|
||||
|
||||
err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info,
|
||||
&ahci_platform_sht);
|
||||
if (err) {
|
||||
|
|
|
@ -2520,7 +2520,7 @@ static int ahci_host_activate_multi_irqs(struct ata_host *host,
|
|||
*/
|
||||
for (i = 0; i < host->n_ports; i++) {
|
||||
struct ahci_port_priv *pp = host->ports[i]->private_data;
|
||||
int irq = ahci_irq_vector(hpriv, i);
|
||||
int irq = hpriv->get_irq_vector(host, i);
|
||||
|
||||
/* Do not receive interrupts sent by dummy ports */
|
||||
if (!pp) {
|
||||
|
@ -2556,10 +2556,15 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
|
|||
int irq = hpriv->irq;
|
||||
int rc;
|
||||
|
||||
if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
|
||||
if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) {
|
||||
if (hpriv->irq_handler)
|
||||
dev_warn(host->dev,
|
||||
"both AHCI_HFLAG_MULTI_MSI flag set and custom irq handler implemented\n");
|
||||
if (!hpriv->get_irq_vector) {
|
||||
dev_err(host->dev,
|
||||
"AHCI_HFLAG_MULTI_MSI requires ->get_irq_vector!\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
rc = ahci_host_activate_multi_irqs(host, sht);
|
||||
} else {
|
||||
|
|
|
@ -1159,8 +1159,6 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
|
|||
{
|
||||
sdev->use_10_for_rw = 1;
|
||||
sdev->use_10_for_ms = 1;
|
||||
sdev->no_report_opcodes = 1;
|
||||
sdev->no_write_same = 1;
|
||||
|
||||
/* Schedule policy is determined by ->qc_defer() callback and
|
||||
* it needs to see every deferred qc. Set dev_blocked to 1 to
|
||||
|
@ -3282,18 +3280,125 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_format_dsm_trim_descr() - SATL Write Same to DSM Trim
|
||||
* @cmd: SCSI command being translated
|
||||
* @trmax: Maximum number of entries that will fit in sector_size bytes.
|
||||
* @sector: Starting sector
|
||||
* @count: Total Range of request in logical sectors
|
||||
*
|
||||
* Rewrite the WRITE SAME descriptor to be a DSM TRIM little-endian formatted
|
||||
* descriptor.
|
||||
*
|
||||
* Upto 64 entries of the format:
|
||||
* 63:48 Range Length
|
||||
* 47:0 LBA
|
||||
*
|
||||
* Range Length of 0 is ignored.
|
||||
* LBA's should be sorted order and not overlap.
|
||||
*
|
||||
* NOTE: this is the same format as ADD LBA(S) TO NV CACHE PINNED SET
|
||||
*
|
||||
* Return: Number of bytes copied into sglist.
|
||||
*/
|
||||
static size_t ata_format_dsm_trim_descr(struct scsi_cmnd *cmd, u32 trmax,
|
||||
u64 sector, u32 count)
|
||||
{
|
||||
struct scsi_device *sdp = cmd->device;
|
||||
size_t len = sdp->sector_size;
|
||||
size_t r;
|
||||
__le64 *buf;
|
||||
u32 i = 0;
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON(len > ATA_SCSI_RBUF_SIZE);
|
||||
|
||||
if (len > ATA_SCSI_RBUF_SIZE)
|
||||
len = ATA_SCSI_RBUF_SIZE;
|
||||
|
||||
spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
|
||||
buf = ((void *)ata_scsi_rbuf);
|
||||
memset(buf, 0, len);
|
||||
while (i < trmax) {
|
||||
u64 entry = sector |
|
||||
((u64)(count > 0xffff ? 0xffff : count) << 48);
|
||||
buf[i++] = __cpu_to_le64(entry);
|
||||
if (count <= 0xffff)
|
||||
break;
|
||||
count -= 0xffff;
|
||||
sector += 0xffff;
|
||||
}
|
||||
r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len);
|
||||
spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_format_dsm_trim_descr() - SATL Write Same to ATA SCT Write Same
|
||||
* @cmd: SCSI command being translated
|
||||
* @lba: Starting sector
|
||||
* @num: Number of sectors to be zero'd.
|
||||
*
|
||||
* Rewrite the WRITE SAME payload to be an SCT Write Same formatted
|
||||
* descriptor.
|
||||
* NOTE: Writes a pattern (0's) in the foreground.
|
||||
*
|
||||
* Return: Number of bytes copied into sglist.
|
||||
*/
|
||||
static size_t ata_format_sct_write_same(struct scsi_cmnd *cmd, u64 lba, u64 num)
|
||||
{
|
||||
struct scsi_device *sdp = cmd->device;
|
||||
size_t len = sdp->sector_size;
|
||||
size_t r;
|
||||
u16 *buf;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ata_scsi_rbuf_lock, flags);
|
||||
buf = ((void *)ata_scsi_rbuf);
|
||||
|
||||
put_unaligned_le16(0x0002, &buf[0]); /* SCT_ACT_WRITE_SAME */
|
||||
put_unaligned_le16(0x0101, &buf[1]); /* WRITE PTRN FG */
|
||||
put_unaligned_le64(lba, &buf[2]);
|
||||
put_unaligned_le64(num, &buf[6]);
|
||||
put_unaligned_le32(0u, &buf[10]); /* pattern */
|
||||
|
||||
WARN_ON(len > ATA_SCSI_RBUF_SIZE);
|
||||
|
||||
if (len > ATA_SCSI_RBUF_SIZE)
|
||||
len = ATA_SCSI_RBUF_SIZE;
|
||||
|
||||
r = sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), buf, len);
|
||||
spin_unlock_irqrestore(&ata_scsi_rbuf_lock, flags);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_scsi_write_same_xlat() - SATL Write Same to ATA SCT Write Same
|
||||
* @qc: Command to be translated
|
||||
*
|
||||
* Translate a SCSI WRITE SAME command to be either a DSM TRIM command or
|
||||
* an SCT Write Same command.
|
||||
* Based on WRITE SAME has the UNMAP flag
|
||||
* When set translate to DSM TRIM
|
||||
* When clear translate to SCT Write Same
|
||||
*/
|
||||
static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_taskfile *tf = &qc->tf;
|
||||
struct scsi_cmnd *scmd = qc->scsicmd;
|
||||
struct scsi_device *sdp = scmd->device;
|
||||
size_t len = sdp->sector_size;
|
||||
struct ata_device *dev = qc->dev;
|
||||
const u8 *cdb = scmd->cmnd;
|
||||
u64 block;
|
||||
u32 n_block;
|
||||
const u32 trmax = len >> 3;
|
||||
u32 size;
|
||||
void *buf;
|
||||
u16 fp;
|
||||
u8 bp = 0xff;
|
||||
u8 unmap = cdb[1] & 0x8;
|
||||
|
||||
/* we may not issue DMA commands if no DMA mode is set */
|
||||
if (unlikely(!dev->dma_mode))
|
||||
|
@ -3305,11 +3410,26 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
|
|||
}
|
||||
scsi_16_lba_len(cdb, &block, &n_block);
|
||||
|
||||
/* for now we only support WRITE SAME with the unmap bit set */
|
||||
if (unlikely(!(cdb[1] & 0x8))) {
|
||||
fp = 1;
|
||||
bp = 3;
|
||||
goto invalid_fld;
|
||||
if (unmap) {
|
||||
/* If trim is not enabled the cmd is invalid. */
|
||||
if ((dev->horkage & ATA_HORKAGE_NOTRIM) ||
|
||||
!ata_id_has_trim(dev->id)) {
|
||||
fp = 1;
|
||||
bp = 3;
|
||||
goto invalid_fld;
|
||||
}
|
||||
/* If the request is too large the cmd is invalid */
|
||||
if (n_block > 0xffff * trmax) {
|
||||
fp = 2;
|
||||
goto invalid_fld;
|
||||
}
|
||||
} else {
|
||||
/* If write same is not available the cmd is invalid */
|
||||
if (!ata_id_sct_write_same(dev->id)) {
|
||||
fp = 1;
|
||||
bp = 3;
|
||||
goto invalid_fld;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3319,32 +3439,54 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
|
|||
if (!scsi_sg_count(scmd))
|
||||
goto invalid_param_len;
|
||||
|
||||
buf = page_address(sg_page(scsi_sglist(scmd)));
|
||||
/*
|
||||
* size must match sector size in bytes
|
||||
* For DATA SET MANAGEMENT TRIM in ACS-2 nsect (aka count)
|
||||
* is defined as number of 512 byte blocks to be transferred.
|
||||
*/
|
||||
if (unmap) {
|
||||
size = ata_format_dsm_trim_descr(scmd, trmax, block, n_block);
|
||||
if (size != len)
|
||||
goto invalid_param_len;
|
||||
|
||||
if (n_block <= 65535 * ATA_MAX_TRIM_RNUM) {
|
||||
size = ata_set_lba_range_entries(buf, ATA_MAX_TRIM_RNUM, block, n_block);
|
||||
if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
|
||||
/* Newer devices support queued TRIM commands */
|
||||
tf->protocol = ATA_PROT_NCQ;
|
||||
tf->command = ATA_CMD_FPDMA_SEND;
|
||||
tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
|
||||
tf->nsect = qc->tag << 3;
|
||||
tf->hob_feature = (size / 512) >> 8;
|
||||
tf->feature = size / 512;
|
||||
|
||||
tf->auxiliary = 1;
|
||||
} else {
|
||||
tf->protocol = ATA_PROT_DMA;
|
||||
tf->hob_feature = 0;
|
||||
tf->feature = ATA_DSM_TRIM;
|
||||
tf->hob_nsect = (size / 512) >> 8;
|
||||
tf->nsect = size / 512;
|
||||
tf->command = ATA_CMD_DSM;
|
||||
}
|
||||
} else {
|
||||
fp = 2;
|
||||
goto invalid_fld;
|
||||
}
|
||||
size = ata_format_sct_write_same(scmd, block, n_block);
|
||||
if (size != len)
|
||||
goto invalid_param_len;
|
||||
|
||||
if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
|
||||
/* Newer devices support queued TRIM commands */
|
||||
tf->protocol = ATA_PROT_NCQ;
|
||||
tf->command = ATA_CMD_FPDMA_SEND;
|
||||
tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
|
||||
tf->nsect = qc->tag << 3;
|
||||
tf->hob_feature = (size / 512) >> 8;
|
||||
tf->feature = size / 512;
|
||||
|
||||
tf->auxiliary = 1;
|
||||
} else {
|
||||
tf->protocol = ATA_PROT_DMA;
|
||||
tf->hob_feature = 0;
|
||||
tf->feature = ATA_DSM_TRIM;
|
||||
tf->hob_nsect = (size / 512) >> 8;
|
||||
tf->nsect = size / 512;
|
||||
tf->command = ATA_CMD_DSM;
|
||||
tf->feature = 0;
|
||||
tf->hob_nsect = 0;
|
||||
tf->nsect = 1;
|
||||
tf->lbah = 0;
|
||||
tf->lbam = 0;
|
||||
tf->lbal = ATA_CMD_STANDBYNOW1;
|
||||
tf->hob_lbah = 0;
|
||||
tf->hob_lbam = 0;
|
||||
tf->hob_lbal = 0;
|
||||
tf->device = ATA_CMD_STANDBYNOW1;
|
||||
tf->protocol = ATA_PROT_DMA;
|
||||
tf->command = ATA_CMD_WRITE_LOG_DMA_EXT;
|
||||
if (unlikely(dev->flags & ATA_DFLAG_PIO))
|
||||
tf->command = ATA_CMD_WRITE_LOG_EXT;
|
||||
}
|
||||
|
||||
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 |
|
||||
|
@ -3367,6 +3509,76 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_scsiop_maint_in - Simulate a subset of MAINTENANCE_IN
|
||||
* @args: device MAINTENANCE_IN data / SCSI command of interest.
|
||||
* @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
|
||||
*
|
||||
* Yields a subset to satisfy scsi_report_opcode()
|
||||
*
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
|
||||
{
|
||||
struct ata_device *dev = args->dev;
|
||||
u8 *cdb = args->cmd->cmnd;
|
||||
u8 supported = 0;
|
||||
unsigned int err = 0;
|
||||
|
||||
if (cdb[2] != 1) {
|
||||
ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
|
||||
err = 2;
|
||||
goto out;
|
||||
}
|
||||
switch (cdb[3]) {
|
||||
case INQUIRY:
|
||||
case MODE_SENSE:
|
||||
case MODE_SENSE_10:
|
||||
case READ_CAPACITY:
|
||||
case SERVICE_ACTION_IN_16:
|
||||
case REPORT_LUNS:
|
||||
case REQUEST_SENSE:
|
||||
case SYNCHRONIZE_CACHE:
|
||||
case REZERO_UNIT:
|
||||
case SEEK_6:
|
||||
case SEEK_10:
|
||||
case TEST_UNIT_READY:
|
||||
case SEND_DIAGNOSTIC:
|
||||
case MAINTENANCE_IN:
|
||||
case READ_6:
|
||||
case READ_10:
|
||||
case READ_16:
|
||||
case WRITE_6:
|
||||
case WRITE_10:
|
||||
case WRITE_16:
|
||||
case ATA_12:
|
||||
case ATA_16:
|
||||
case VERIFY:
|
||||
case VERIFY_16:
|
||||
case MODE_SELECT:
|
||||
case MODE_SELECT_10:
|
||||
case START_STOP:
|
||||
supported = 3;
|
||||
break;
|
||||
case WRITE_SAME_16:
|
||||
if (!ata_id_sct_write_same(dev->id))
|
||||
break;
|
||||
/* fallthrough: if SCT ... only enable for ZBC */
|
||||
case ZBC_IN:
|
||||
case ZBC_OUT:
|
||||
if (ata_id_zoned_cap(dev->id) ||
|
||||
dev->class == ATA_DEV_ZAC)
|
||||
supported = 3;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
out:
|
||||
rbuf[1] = supported; /* supported */
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_scsi_report_zones_complete - convert ATA output
|
||||
* @qc: command structure returning the data
|
||||
|
@ -3610,7 +3822,7 @@ static int ata_mselect_caching(struct ata_queued_cmd *qc,
|
|||
{
|
||||
struct ata_taskfile *tf = &qc->tf;
|
||||
struct ata_device *dev = qc->dev;
|
||||
char mpage[CACHE_MPAGE_LEN];
|
||||
u8 mpage[CACHE_MPAGE_LEN];
|
||||
u8 wce;
|
||||
int i;
|
||||
|
||||
|
@ -3666,7 +3878,7 @@ static int ata_mselect_control(struct ata_queued_cmd *qc,
|
|||
const u8 *buf, int len, u16 *fp)
|
||||
{
|
||||
struct ata_device *dev = qc->dev;
|
||||
char mpage[CONTROL_MPAGE_LEN];
|
||||
u8 mpage[CONTROL_MPAGE_LEN];
|
||||
u8 d_sense;
|
||||
int i;
|
||||
|
||||
|
@ -3701,8 +3913,6 @@ static int ata_mselect_control(struct ata_queued_cmd *qc,
|
|||
dev->flags |= ATA_DFLAG_D_SENSE;
|
||||
else
|
||||
dev->flags &= ~ATA_DFLAG_D_SENSE;
|
||||
qc->scsicmd->result = SAM_STAT_GOOD;
|
||||
qc->scsicmd->scsi_done(qc->scsicmd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3829,6 +4039,8 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
|
|||
if (ata_mselect_control(qc, p, pg_len, &fp) < 0) {
|
||||
fp += hdr_len + bd_len;
|
||||
goto invalid_param;
|
||||
} else {
|
||||
goto skip; /* No ATA command to send */
|
||||
}
|
||||
break;
|
||||
default: /* invalid page code */
|
||||
|
@ -4147,6 +4359,13 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
|
|||
ata_scsi_invalid_field(dev, cmd, 1);
|
||||
break;
|
||||
|
||||
case MAINTENANCE_IN:
|
||||
if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
|
||||
ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
|
||||
else
|
||||
ata_scsi_invalid_field(dev, cmd, 1);
|
||||
break;
|
||||
|
||||
/* all other commands */
|
||||
default:
|
||||
ata_scsi_set_sense(dev, cmd, ILLEGAL_REQUEST, 0x20, 0x0);
|
||||
|
@ -4179,7 +4398,6 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
|
|||
shost->max_lun = 1;
|
||||
shost->max_channel = 1;
|
||||
shost->max_cmd_len = 16;
|
||||
shost->no_write_same = 1;
|
||||
|
||||
/* Schedule policy is determined by ->qc_defer()
|
||||
* callback and it needs to see every deferred qc.
|
||||
|
|
|
@ -347,10 +347,8 @@ static int at91sam9_smc_fields_init(struct device *dev)
|
|||
|
||||
field.reg = AT91SAM9_SMC_MODE(AT91SAM9_SMC_GENERIC);
|
||||
fields.mode = devm_regmap_field_alloc(dev, smc, field);
|
||||
if (IS_ERR(fields.mode))
|
||||
return PTR_ERR(fields.mode);
|
||||
|
||||
return 0;
|
||||
return PTR_ERR_OR_ZERO(fields.mode);
|
||||
}
|
||||
|
||||
static int pata_at91_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -152,8 +152,7 @@ static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
|
|||
div = 8;
|
||||
T = (int)((1000000000000LL * div) / octeon_get_io_clock_rate());
|
||||
|
||||
if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T))
|
||||
BUG();
|
||||
BUG_ON(ata_timing_compute(dev, dev->pio_mode, &timing, T, T));
|
||||
|
||||
t1 = timing.setup;
|
||||
if (t1)
|
||||
|
|
|
@ -1727,15 +1727,13 @@ static int mv_port_start(struct ata_port *ap)
|
|||
return -ENOMEM;
|
||||
ap->private_data = pp;
|
||||
|
||||
pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
|
||||
pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
|
||||
if (!pp->crqb)
|
||||
return -ENOMEM;
|
||||
memset(pp->crqb, 0, MV_CRQB_Q_SZ);
|
||||
|
||||
pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
|
||||
pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
|
||||
if (!pp->crpb)
|
||||
goto out_port_free_dma_mem;
|
||||
memset(pp->crpb, 0, MV_CRPB_Q_SZ);
|
||||
|
||||
/* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
|
||||
if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
#ifndef __ASM_GENERIC_LIBATA_PORTMAP_H
|
||||
#define __ASM_GENERIC_LIBATA_PORTMAP_H
|
||||
|
||||
#define ATA_PRIMARY_IRQ(dev) 14
|
||||
#define ATA_SECONDARY_IRQ(dev) 15
|
||||
|
||||
#endif
|
|
@ -105,6 +105,7 @@ enum {
|
|||
ATA_ID_CFA_KEY_MGMT = 162,
|
||||
ATA_ID_CFA_MODES = 163,
|
||||
ATA_ID_DATA_SET_MGMT = 169,
|
||||
ATA_ID_SCT_CMD_XPORT = 206,
|
||||
ATA_ID_ROT_SPEED = 217,
|
||||
ATA_ID_PIO4 = (1 << 1),
|
||||
|
||||
|
@ -788,6 +789,48 @@ static inline bool ata_id_sense_reporting_enabled(const u16 *id)
|
|||
return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* Word: 206 - SCT Command Transport
|
||||
* 15:12 - Vendor Specific
|
||||
* 11:6 - Reserved
|
||||
* 5 - SCT Command Transport Data Tables supported
|
||||
* 4 - SCT Command Transport Features Control supported
|
||||
* 3 - SCT Command Transport Error Recovery Control supported
|
||||
* 2 - SCT Command Transport Write Same supported
|
||||
* 1 - SCT Command Transport Long Sector Access supported
|
||||
* 0 - SCT Command Transport supported
|
||||
*/
|
||||
static inline bool ata_id_sct_data_tables(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 5) ? true : false;
|
||||
}
|
||||
|
||||
static inline bool ata_id_sct_features_ctrl(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 4) ? true : false;
|
||||
}
|
||||
|
||||
static inline bool ata_id_sct_error_recovery_ctrl(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 3) ? true : false;
|
||||
}
|
||||
|
||||
static inline bool ata_id_sct_write_same(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 2) ? true : false;
|
||||
}
|
||||
|
||||
static inline bool ata_id_sct_long_sector_access(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 1) ? true : false;
|
||||
}
|
||||
|
||||
static inline bool ata_id_sct_supported(const u16 *id)
|
||||
{
|
||||
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 0) ? true : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_id_major_version - get ATA level of drive
|
||||
* @id: Identify data
|
||||
|
@ -1071,32 +1114,6 @@ static inline void ata_id_to_hd_driveid(u16 *id)
|
|||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Write LBA Range Entries to the buffer that will cover the extent from
|
||||
* sector to sector + count. This is used for TRIM and for ADD LBA(S)
|
||||
* TO NV CACHE PINNED SET.
|
||||
*/
|
||||
static inline unsigned ata_set_lba_range_entries(void *_buffer,
|
||||
unsigned num, u64 sector, unsigned long count)
|
||||
{
|
||||
__le64 *buffer = _buffer;
|
||||
unsigned i = 0, used_bytes;
|
||||
|
||||
while (i < num) {
|
||||
u64 entry = sector |
|
||||
((u64)(count > 0xffff ? 0xffff : count) << 48);
|
||||
buffer[i++] = __cpu_to_le64(entry);
|
||||
if (count <= 0xffff)
|
||||
break;
|
||||
count -= 0xffff;
|
||||
sector += 0xffff;
|
||||
}
|
||||
|
||||
used_bytes = ALIGN(i * 8, 512);
|
||||
memset(buffer + i, 0, used_bytes - i * 8);
|
||||
return used_bytes;
|
||||
}
|
||||
|
||||
static inline bool ata_ok(u8 status)
|
||||
{
|
||||
return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
|
||||
|
|
|
@ -46,7 +46,8 @@
|
|||
#ifdef CONFIG_ATA_NONSTANDARD
|
||||
#include <asm/libata-portmap.h>
|
||||
#else
|
||||
#include <asm-generic/libata-portmap.h>
|
||||
#define ATA_PRIMARY_IRQ(dev) 14
|
||||
#define ATA_SECONDARY_IRQ(dev) 15
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue
Block a user