Merge branch 'for-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata
Pull libata updates from Tejun Heo: - ahci grew runtime power management support so that the controller can be turned off if no devices are attached. - sata_via isn't dead yet. It got hotplug support and more refined workaround for certain WD drives. - Misc cleanups. There's a merge from for-4.5-fixes to avoid confusing conflicts in ahci PCI ID table. * 'for-4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata: ata: ahci_xgene: dereferencing uninitialized pointer in probe AHCI: Remove obsolete Intel Lewisburg SATA RAID device IDs ata: sata_rcar: Use ARCH_RENESAS sata_via: Implement hotplug for VT6421 sata_via: Apply WD workaround only when needed on VT6421 ahci: Add runtime PM support for the host controller ahci: Add functions to manage runtime PM of AHCI ports ahci: Convert driver to use modern PM hooks ahci: Cache host controller version scsi: Drop runtime PM usage count after host is added scsi: Set request queue runtime PM status back to active on resume block: Add blk_set_runtime_active() ata: ahci_mvebu: add support for Armada 3700 variant libata: fix unbalanced spin_lock_irqsave/spin_unlock_irq() in ata_scsi_park_show() libata: support AHCI on OCTEON platform
This commit is contained in:
commit
fcab86add7
|
@ -11,6 +11,7 @@ Required properties:
|
|||
- compatible : compatible string, one of:
|
||||
- "allwinner,sun4i-a10-ahci"
|
||||
- "hisilicon,hisi-ahci"
|
||||
- "cavium,octeon-7130-ahci"
|
||||
- "ibm,476gtr-ahci"
|
||||
- "marvell,armada-380-ahci"
|
||||
- "snps,dwc-ahci"
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
* UCTL SATA controller glue
|
||||
|
||||
UCTL is the bridge unit between the I/O interconnect (an internal bus)
|
||||
and the SATA AHCI host controller (UAHC). It performs the following functions:
|
||||
- provides interfaces for the applications to access the UAHC AHCI
|
||||
registers on the CN71XX I/O space.
|
||||
- provides a bridge for UAHC to fetch AHCI command table entries and data
|
||||
buffers from Level 2 Cache.
|
||||
- posts interrupts to the CIU.
|
||||
- contains registers that:
|
||||
- control the behavior of the UAHC
|
||||
- control the clock/reset generation to UAHC
|
||||
- control endian swapping for all UAHC registers and DMA accesses
|
||||
|
||||
Properties:
|
||||
|
||||
- compatible: "cavium,octeon-7130-sata-uctl"
|
||||
|
||||
Compatibility with the cn7130 SOC.
|
||||
|
||||
- reg: The base address of the UCTL register bank.
|
||||
|
||||
- #address-cells, #size-cells, ranges and dma-ranges must be present and hold
|
||||
suitable values to map all child nodes.
|
||||
|
||||
Example:
|
||||
|
||||
uctl@118006c000000 {
|
||||
compatible = "cavium,octeon-7130-sata-uctl";
|
||||
reg = <0x11800 0x6c000000 0x0 0x100>;
|
||||
ranges; /* Direct mapping */
|
||||
dma-ranges;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
sata: sata@16c0000000000 {
|
||||
compatible = "cavium,octeon-7130-ahci";
|
||||
reg = <0x16c00 0x00000000 0x0 0x200>;
|
||||
interrupt-parent = <&cibsata>;
|
||||
interrupts = <2 4>; /* Bit: 2, level */
|
||||
};
|
||||
};
|
|
@ -275,6 +275,11 @@ static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
|
|||
cvmx_read64(CVMX_MIO_BOOT_BIST_STAT);
|
||||
}
|
||||
|
||||
static inline void cvmx_writeq_csr(void __iomem *csr_addr, uint64_t val)
|
||||
{
|
||||
cvmx_write_csr((__force uint64_t)csr_addr, val);
|
||||
}
|
||||
|
||||
static inline void cvmx_write_io(uint64_t io_addr, uint64_t val)
|
||||
{
|
||||
cvmx_write64(io_addr, val);
|
||||
|
@ -287,6 +292,10 @@ static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
|
|||
return val;
|
||||
}
|
||||
|
||||
static inline uint64_t cvmx_readq_csr(void __iomem *csr_addr)
|
||||
{
|
||||
return cvmx_read_csr((__force uint64_t) csr_addr);
|
||||
}
|
||||
|
||||
static inline void cvmx_send_single(uint64_t data)
|
||||
{
|
||||
|
|
|
@ -3529,6 +3529,30 @@ void blk_post_runtime_resume(struct request_queue *q, int err)
|
|||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_post_runtime_resume);
|
||||
|
||||
/**
|
||||
* blk_set_runtime_active - Force runtime status of the queue to be active
|
||||
* @q: the queue of the device
|
||||
*
|
||||
* If the device is left runtime suspended during system suspend the resume
|
||||
* hook typically resumes the device and corrects runtime status
|
||||
* accordingly. However, that does not affect the queue runtime PM status
|
||||
* which is still "suspended". This prevents processing requests from the
|
||||
* queue.
|
||||
*
|
||||
* This function can be used in driver's resume hook to correct queue
|
||||
* runtime PM status and re-enable peeking requests from the queue. It
|
||||
* should be called before first request is added to the queue.
|
||||
*/
|
||||
void blk_set_runtime_active(struct request_queue *q)
|
||||
{
|
||||
spin_lock_irq(q->queue_lock);
|
||||
q->rpm_status = RPM_ACTIVE;
|
||||
pm_runtime_mark_last_busy(q->dev);
|
||||
pm_request_autosuspend(q->dev);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_set_runtime_active);
|
||||
#endif
|
||||
|
||||
int __init blk_dev_init(void)
|
||||
|
|
|
@ -151,6 +151,15 @@ config AHCI_MVEBU
|
|||
|
||||
If unsure, say N.
|
||||
|
||||
config AHCI_OCTEON
|
||||
tristate "Cavium Octeon Soc Serial ATA"
|
||||
depends on SATA_AHCI_PLATFORM && CAVIUM_OCTEON_SOC
|
||||
default y
|
||||
help
|
||||
This option enables support for Cavium Octeon SoC Serial ATA.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config AHCI_SUNXI
|
||||
tristate "Allwinner sunxi AHCI SATA support"
|
||||
depends on ARCH_SUNXI
|
||||
|
@ -355,7 +364,7 @@ config SATA_PROMISE
|
|||
|
||||
config SATA_RCAR
|
||||
tristate "Renesas R-Car SATA support"
|
||||
depends on ARCH_SHMOBILE || COMPILE_TEST
|
||||
depends on ARCH_RENESAS || COMPILE_TEST
|
||||
help
|
||||
This option enables support for Renesas R-Car Serial ATA.
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ obj-$(CONFIG_AHCI_CEVA) += ahci_ceva.o libahci.o libahci_platform.o
|
|||
obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o
|
||||
obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o
|
||||
obj-$(CONFIG_AHCI_MVEBU) += ahci_mvebu.o libahci.o libahci_platform.o
|
||||
obj-$(CONFIG_AHCI_OCTEON) += ahci_octeon.o
|
||||
obj-$(CONFIG_AHCI_SUNXI) += ahci_sunxi.o libahci.o libahci_platform.o
|
||||
obj-$(CONFIG_AHCI_ST) += ahci_st.o libahci.o libahci_platform.o
|
||||
obj-$(CONFIG_AHCI_TEGRA) += ahci_tegra.o libahci.o libahci_platform.o
|
||||
|
|
|
@ -85,6 +85,7 @@ enum board_ids {
|
|||
};
|
||||
|
||||
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
|
||||
static void ahci_remove_one(struct pci_dev *dev);
|
||||
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
|
||||
unsigned long deadline);
|
||||
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
|
||||
|
@ -94,9 +95,13 @@ static bool is_mcp89_apple(struct pci_dev *pdev);
|
|||
static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
|
||||
unsigned long deadline);
|
||||
#ifdef CONFIG_PM
|
||||
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
|
||||
static int ahci_pci_device_resume(struct pci_dev *pdev);
|
||||
static int ahci_pci_device_runtime_suspend(struct device *dev);
|
||||
static int ahci_pci_device_runtime_resume(struct device *dev);
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int ahci_pci_device_suspend(struct device *dev);
|
||||
static int ahci_pci_device_resume(struct device *dev);
|
||||
#endif
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static struct scsi_host_template ahci_sht = {
|
||||
AHCI_SHT("ahci"),
|
||||
|
@ -371,15 +376,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|||
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
|
||||
{ PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
|
||||
|
||||
|
@ -563,16 +564,20 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
|||
{ } /* terminate list */
|
||||
};
|
||||
|
||||
static const struct dev_pm_ops ahci_pci_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(ahci_pci_device_suspend, ahci_pci_device_resume)
|
||||
SET_RUNTIME_PM_OPS(ahci_pci_device_runtime_suspend,
|
||||
ahci_pci_device_runtime_resume, NULL)
|
||||
};
|
||||
|
||||
static struct pci_driver ahci_pci_driver = {
|
||||
.name = DRV_NAME,
|
||||
.id_table = ahci_pci_tbl,
|
||||
.probe = ahci_init_one,
|
||||
.remove = ata_pci_remove_one,
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = ahci_pci_device_suspend,
|
||||
.resume = ahci_pci_device_resume,
|
||||
#endif
|
||||
.remove = ahci_remove_one,
|
||||
.driver = {
|
||||
.pm = &ahci_pci_pm_ops,
|
||||
},
|
||||
};
|
||||
|
||||
#if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
|
||||
|
@ -801,43 +806,67 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
|
|||
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
|
||||
static void ahci_pci_disable_interrupts(struct ata_host *host)
|
||||
{
|
||||
struct ata_host *host = pci_get_drvdata(pdev);
|
||||
struct ahci_host_priv *hpriv = host->private_data;
|
||||
void __iomem *mmio = hpriv->mmio;
|
||||
u32 ctl;
|
||||
|
||||
if (mesg.event & PM_EVENT_SUSPEND &&
|
||||
hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
|
||||
/* AHCI spec rev1.1 section 8.3.3:
|
||||
* Software must disable interrupts prior to requesting a
|
||||
* transition of the HBA to D3 state.
|
||||
*/
|
||||
ctl = readl(mmio + HOST_CTL);
|
||||
ctl &= ~HOST_IRQ_EN;
|
||||
writel(ctl, mmio + HOST_CTL);
|
||||
readl(mmio + HOST_CTL); /* flush */
|
||||
}
|
||||
|
||||
static int ahci_pci_device_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct ata_host *host = pci_get_drvdata(pdev);
|
||||
|
||||
ahci_pci_disable_interrupts(host);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ahci_pci_device_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct ata_host *host = pci_get_drvdata(pdev);
|
||||
int rc;
|
||||
|
||||
rc = ahci_pci_reset_controller(host);
|
||||
if (rc)
|
||||
return rc;
|
||||
ahci_pci_init_controller(host);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int ahci_pci_device_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct ata_host *host = pci_get_drvdata(pdev);
|
||||
struct ahci_host_priv *hpriv = host->private_data;
|
||||
|
||||
if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
|
||||
dev_err(&pdev->dev,
|
||||
"BIOS update required for suspend/resume\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (mesg.event & PM_EVENT_SLEEP) {
|
||||
/* AHCI spec rev1.1 section 8.3.3:
|
||||
* Software must disable interrupts prior to requesting a
|
||||
* transition of the HBA to D3 state.
|
||||
*/
|
||||
ctl = readl(mmio + HOST_CTL);
|
||||
ctl &= ~HOST_IRQ_EN;
|
||||
writel(ctl, mmio + HOST_CTL);
|
||||
readl(mmio + HOST_CTL); /* flush */
|
||||
}
|
||||
|
||||
return ata_pci_device_suspend(pdev, mesg);
|
||||
ahci_pci_disable_interrupts(host);
|
||||
return ata_host_suspend(host, PMSG_SUSPEND);
|
||||
}
|
||||
|
||||
static int ahci_pci_device_resume(struct pci_dev *pdev)
|
||||
static int ahci_pci_device_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct ata_host *host = pci_get_drvdata(pdev);
|
||||
int rc;
|
||||
|
||||
rc = ata_pci_device_do_resume(pdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Apple BIOS helpfully mangles the registers on resume */
|
||||
if (is_mcp89_apple(pdev))
|
||||
ahci_mcp89_apple_enable(pdev);
|
||||
|
@ -856,6 +885,8 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
|
|||
}
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
|
||||
{
|
||||
int rc;
|
||||
|
@ -1718,7 +1749,18 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
pci_set_master(pdev);
|
||||
|
||||
return ahci_host_activate(host, &ahci_sht);
|
||||
rc = ahci_host_activate(host, &ahci_sht);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ahci_remove_one(struct pci_dev *pdev)
|
||||
{
|
||||
pm_runtime_get_noresume(&pdev->dev);
|
||||
ata_pci_remove_one(pdev);
|
||||
}
|
||||
|
||||
module_pci_driver(ahci_pci_driver);
|
||||
|
|
|
@ -335,6 +335,7 @@ struct ahci_host_priv {
|
|||
void __iomem * mmio; /* bus-independent mem map */
|
||||
u32 cap; /* cap to use */
|
||||
u32 cap2; /* cap2 to use */
|
||||
u32 version; /* cached version */
|
||||
u32 port_map; /* port map to use */
|
||||
u32 saved_cap; /* saved initial cap */
|
||||
u32 saved_cap2; /* saved initial cap2 */
|
||||
|
|
|
@ -112,12 +112,15 @@ static int ahci_mvebu_probe(struct platform_device *pdev)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
dram = mv_mbus_dram_info();
|
||||
if (!dram)
|
||||
return -ENODEV;
|
||||
if (of_device_is_compatible(pdev->dev.of_node,
|
||||
"marvell,armada-380-ahci")) {
|
||||
dram = mv_mbus_dram_info();
|
||||
if (!dram)
|
||||
return -ENODEV;
|
||||
|
||||
ahci_mvebu_mbus_config(hpriv, dram);
|
||||
ahci_mvebu_regret_option(hpriv);
|
||||
ahci_mvebu_mbus_config(hpriv, dram);
|
||||
ahci_mvebu_regret_option(hpriv);
|
||||
}
|
||||
|
||||
rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info,
|
||||
&ahci_platform_sht);
|
||||
|
@ -133,6 +136,7 @@ static int ahci_mvebu_probe(struct platform_device *pdev)
|
|||
|
||||
static const struct of_device_id ahci_mvebu_of_match[] = {
|
||||
{ .compatible = "marvell,armada-380-ahci", },
|
||||
{ .compatible = "marvell,armada-3700-ahci", },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, ahci_mvebu_of_match);
|
||||
|
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* SATA glue for Cavium Octeon III SOCs.
|
||||
*
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2010-2015 Cavium Networks
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/of_platform.h>
|
||||
|
||||
#include <asm/octeon/octeon.h>
|
||||
#include <asm/bitfield.h>
|
||||
|
||||
#define CVMX_SATA_UCTL_SHIM_CFG 0xE8
|
||||
|
||||
#define SATA_UCTL_ENDIAN_MODE_BIG 1
|
||||
#define SATA_UCTL_ENDIAN_MODE_LITTLE 0
|
||||
#define SATA_UCTL_ENDIAN_MODE_MASK 3
|
||||
|
||||
#define SATA_UCTL_DMA_ENDIAN_MODE_SHIFT 8
|
||||
#define SATA_UCTL_CSR_ENDIAN_MODE_SHIFT 0
|
||||
#define SATA_UCTL_DMA_READ_CMD_SHIFT 12
|
||||
|
||||
static int ahci_octeon_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *node = dev->of_node;
|
||||
struct resource *res;
|
||||
void __iomem *base;
|
||||
u64 cfg;
|
||||
int ret;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
dev_err(&pdev->dev, "Platform resource[0] is missing\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(base))
|
||||
return PTR_ERR(base);
|
||||
|
||||
cfg = cvmx_readq_csr(base + CVMX_SATA_UCTL_SHIM_CFG);
|
||||
|
||||
cfg &= ~(SATA_UCTL_ENDIAN_MODE_MASK << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT);
|
||||
cfg &= ~(SATA_UCTL_ENDIAN_MODE_MASK << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT);
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
cfg |= SATA_UCTL_ENDIAN_MODE_BIG << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT;
|
||||
cfg |= SATA_UCTL_ENDIAN_MODE_BIG << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT;
|
||||
#else
|
||||
cfg |= SATA_UCTL_ENDIAN_MODE_LITTLE << SATA_UCTL_DMA_ENDIAN_MODE_SHIFT;
|
||||
cfg |= SATA_UCTL_ENDIAN_MODE_LITTLE << SATA_UCTL_CSR_ENDIAN_MODE_SHIFT;
|
||||
#endif
|
||||
|
||||
cfg |= 1 << SATA_UCTL_DMA_READ_CMD_SHIFT;
|
||||
|
||||
cvmx_writeq_csr(base + CVMX_SATA_UCTL_SHIM_CFG, cfg);
|
||||
|
||||
if (!node) {
|
||||
dev_err(dev, "no device node, failed to add octeon sata\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = of_platform_populate(node, NULL, NULL, dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to add ahci-platform core\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ahci_octeon_remove(struct platform_device *pdev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id octeon_ahci_match[] = {
|
||||
{ .compatible = "cavium,octeon-7130-sata-uctl", },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, octeon_ahci_match);
|
||||
|
||||
static struct platform_driver ahci_octeon_driver = {
|
||||
.probe = ahci_octeon_probe,
|
||||
.remove = ahci_octeon_remove,
|
||||
.driver = {
|
||||
.name = "octeon-ahci",
|
||||
.of_match_table = octeon_ahci_match,
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(ahci_octeon_driver);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>");
|
||||
MODULE_DESCRIPTION("Cavium Inc. sata config.");
|
|
@ -76,6 +76,7 @@ static const struct of_device_id ahci_of_match[] = {
|
|||
{ .compatible = "ibm,476gtr-ahci", },
|
||||
{ .compatible = "snps,dwc-ahci", },
|
||||
{ .compatible = "hisilicon,hisi-ahci", },
|
||||
{ .compatible = "cavium,octeon-7130-ahci", },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, ahci_of_match);
|
||||
|
|
|
@ -821,9 +821,9 @@ static int xgene_ahci_probe(struct platform_device *pdev)
|
|||
dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n",
|
||||
__func__);
|
||||
version = XGENE_AHCI_V1;
|
||||
}
|
||||
if (info->valid & ACPI_VALID_CID)
|
||||
} else if (info->valid & ACPI_VALID_CID) {
|
||||
version = XGENE_AHCI_V2;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -225,6 +225,31 @@ static void ahci_enable_ahci(void __iomem *mmio)
|
|||
WARN_ON(1);
|
||||
}
|
||||
|
||||
/**
|
||||
* ahci_rpm_get_port - Make sure the port is powered on
|
||||
* @ap: Port to power on
|
||||
*
|
||||
* Whenever there is need to access the AHCI host registers outside of
|
||||
* normal execution paths, call this function to make sure the host is
|
||||
* actually powered on.
|
||||
*/
|
||||
static int ahci_rpm_get_port(struct ata_port *ap)
|
||||
{
|
||||
return pm_runtime_get_sync(ap->dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* ahci_rpm_put_port - Undoes ahci_rpm_get_port()
|
||||
* @ap: Port to power down
|
||||
*
|
||||
* Undoes ahci_rpm_get_port() and possibly powers down the AHCI host
|
||||
* if it has no more active users.
|
||||
*/
|
||||
static void ahci_rpm_put_port(struct ata_port *ap)
|
||||
{
|
||||
pm_runtime_put(ap->dev);
|
||||
}
|
||||
|
||||
static ssize_t ahci_show_host_caps(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
|
@ -251,9 +276,8 @@ static ssize_t ahci_show_host_version(struct device *dev,
|
|||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
struct ata_port *ap = ata_shost_to_port(shost);
|
||||
struct ahci_host_priv *hpriv = ap->host->private_data;
|
||||
void __iomem *mmio = hpriv->mmio;
|
||||
|
||||
return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
|
||||
return sprintf(buf, "%x\n", hpriv->version);
|
||||
}
|
||||
|
||||
static ssize_t ahci_show_port_cmd(struct device *dev,
|
||||
|
@ -262,8 +286,13 @@ static ssize_t ahci_show_port_cmd(struct device *dev,
|
|||
struct Scsi_Host *shost = class_to_shost(dev);
|
||||
struct ata_port *ap = ata_shost_to_port(shost);
|
||||
void __iomem *port_mmio = ahci_port_base(ap);
|
||||
ssize_t ret;
|
||||
|
||||
return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
|
||||
ahci_rpm_get_port(ap);
|
||||
ret = sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
|
||||
ahci_rpm_put_port(ap);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t ahci_read_em_buffer(struct device *dev,
|
||||
|
@ -279,17 +308,20 @@ static ssize_t ahci_read_em_buffer(struct device *dev,
|
|||
size_t count;
|
||||
int i;
|
||||
|
||||
ahci_rpm_get_port(ap);
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
|
||||
em_ctl = readl(mmio + HOST_EM_CTL);
|
||||
if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT ||
|
||||
!(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) {
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
ahci_rpm_put_port(ap);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(em_ctl & EM_CTL_MR)) {
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
ahci_rpm_put_port(ap);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
|
@ -317,6 +349,7 @@ static ssize_t ahci_read_em_buffer(struct device *dev,
|
|||
}
|
||||
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
ahci_rpm_put_port(ap);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
@ -341,11 +374,13 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
|
|||
size % 4 || size > hpriv->em_buf_sz)
|
||||
return -EINVAL;
|
||||
|
||||
ahci_rpm_get_port(ap);
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
|
||||
em_ctl = readl(mmio + HOST_EM_CTL);
|
||||
if (em_ctl & EM_CTL_TM) {
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
ahci_rpm_put_port(ap);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -358,6 +393,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
|
|||
writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
|
||||
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
ahci_rpm_put_port(ap);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
@ -371,7 +407,9 @@ static ssize_t ahci_show_em_supported(struct device *dev,
|
|||
void __iomem *mmio = hpriv->mmio;
|
||||
u32 em_ctl;
|
||||
|
||||
ahci_rpm_get_port(ap);
|
||||
em_ctl = readl(mmio + HOST_EM_CTL);
|
||||
ahci_rpm_put_port(ap);
|
||||
|
||||
return sprintf(buf, "%s%s%s%s\n",
|
||||
em_ctl & EM_CTL_LED ? "led " : "",
|
||||
|
@ -509,6 +547,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
|
|||
/* record values to use during operation */
|
||||
hpriv->cap = cap;
|
||||
hpriv->cap2 = cap2;
|
||||
hpriv->version = readl(mmio + HOST_VERSION);
|
||||
hpriv->port_map = port_map;
|
||||
|
||||
if (!hpriv->start_engine)
|
||||
|
@ -1014,6 +1053,7 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
|
|||
else
|
||||
return -EINVAL;
|
||||
|
||||
ahci_rpm_get_port(ap);
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
|
||||
/*
|
||||
|
@ -1023,6 +1063,7 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
|
|||
em_ctl = readl(mmio + HOST_EM_CTL);
|
||||
if (em_ctl & EM_CTL_TM) {
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
ahci_rpm_put_port(ap);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -1050,6 +1091,8 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
|
|||
emp->led_state = state;
|
||||
|
||||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
ahci_rpm_put_port(ap);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
|
@ -2215,6 +2258,8 @@ static void ahci_pmp_detach(struct ata_port *ap)
|
|||
|
||||
int ahci_port_resume(struct ata_port *ap)
|
||||
{
|
||||
ahci_rpm_get_port(ap);
|
||||
|
||||
ahci_power_up(ap);
|
||||
ahci_start_port(ap);
|
||||
|
||||
|
@ -2241,6 +2286,7 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
|
|||
ata_port_freeze(ap);
|
||||
}
|
||||
|
||||
ahci_rpm_put_port(ap);
|
||||
return rc;
|
||||
}
|
||||
#endif
|
||||
|
@ -2356,11 +2402,10 @@ static void ahci_port_stop(struct ata_port *ap)
|
|||
void ahci_print_info(struct ata_host *host, const char *scc_s)
|
||||
{
|
||||
struct ahci_host_priv *hpriv = host->private_data;
|
||||
void __iomem *mmio = hpriv->mmio;
|
||||
u32 vers, cap, cap2, impl, speed;
|
||||
const char *speed_s;
|
||||
|
||||
vers = readl(mmio + HOST_VERSION);
|
||||
vers = hpriv->version;
|
||||
cap = hpriv->cap;
|
||||
cap2 = hpriv->cap2;
|
||||
impl = hpriv->port_map;
|
||||
|
|
|
@ -174,13 +174,13 @@ static ssize_t ata_scsi_park_show(struct device *device,
|
|||
struct ata_port *ap;
|
||||
struct ata_link *link;
|
||||
struct ata_device *dev;
|
||||
unsigned long flags, now;
|
||||
unsigned long now;
|
||||
unsigned int uninitialized_var(msecs);
|
||||
int rc = 0;
|
||||
|
||||
ap = ata_shost_to_port(sdev->host);
|
||||
|
||||
spin_lock_irqsave(ap->lock, flags);
|
||||
spin_lock_irq(ap->lock);
|
||||
dev = ata_scsi_find_dev(ap, sdev);
|
||||
if (!dev) {
|
||||
rc = -ENODEV;
|
||||
|
|
|
@ -61,6 +61,7 @@ enum {
|
|||
SATA_CHAN_ENAB = 0x40, /* SATA channel enable */
|
||||
SATA_INT_GATE = 0x41, /* SATA interrupt gating */
|
||||
SATA_NATIVE_MODE = 0x42, /* Native mode enable */
|
||||
SVIA_MISC_3 = 0x46, /* Miscellaneous Control III */
|
||||
PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */
|
||||
PATA_PIO_TIMING = 0xAB, /* PATA timing register */
|
||||
|
||||
|
@ -71,9 +72,18 @@ enum {
|
|||
NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
|
||||
|
||||
SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */
|
||||
|
||||
SATA_HOTPLUG = (1 << 5), /* enable IRQ on hotplug */
|
||||
};
|
||||
|
||||
struct svia_priv {
|
||||
bool wd_workaround;
|
||||
};
|
||||
|
||||
static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int svia_pci_device_resume(struct pci_dev *pdev);
|
||||
#endif
|
||||
static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
|
||||
static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
|
||||
static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val);
|
||||
|
@ -85,6 +95,7 @@ static void vt6420_bmdma_start(struct ata_queued_cmd *qc);
|
|||
static int vt6421_pata_cable_detect(struct ata_port *ap);
|
||||
static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev);
|
||||
static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
|
||||
static void vt6421_error_handler(struct ata_port *ap);
|
||||
|
||||
static const struct pci_device_id svia_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(VIA, 0x5337), vt6420 },
|
||||
|
@ -105,7 +116,7 @@ static struct pci_driver svia_pci_driver = {
|
|||
.probe = svia_init_one,
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.suspend = ata_pci_device_suspend,
|
||||
.resume = ata_pci_device_resume,
|
||||
.resume = svia_pci_device_resume,
|
||||
#endif
|
||||
.remove = ata_pci_remove_one,
|
||||
};
|
||||
|
@ -137,6 +148,7 @@ static struct ata_port_operations vt6421_sata_ops = {
|
|||
.inherits = &svia_base_ops,
|
||||
.scr_read = svia_scr_read,
|
||||
.scr_write = svia_scr_write,
|
||||
.error_handler = vt6421_error_handler,
|
||||
};
|
||||
|
||||
static struct ata_port_operations vt8251_ops = {
|
||||
|
@ -536,7 +548,67 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void svia_configure(struct pci_dev *pdev, int board_id)
|
||||
static void svia_wd_fix(struct pci_dev *pdev)
|
||||
{
|
||||
u8 tmp8;
|
||||
|
||||
pci_read_config_byte(pdev, 0x52, &tmp8);
|
||||
pci_write_config_byte(pdev, 0x52, tmp8 | BIT(2));
|
||||
}
|
||||
|
||||
static irqreturn_t vt6421_interrupt(int irq, void *dev_instance)
|
||||
{
|
||||
struct ata_host *host = dev_instance;
|
||||
irqreturn_t rc = ata_bmdma_interrupt(irq, dev_instance);
|
||||
|
||||
/* if the IRQ was not handled, it might be a hotplug IRQ */
|
||||
if (rc != IRQ_HANDLED) {
|
||||
u32 serror;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&host->lock, flags);
|
||||
/* check for hotplug on port 0 */
|
||||
svia_scr_read(&host->ports[0]->link, SCR_ERROR, &serror);
|
||||
if (serror & SERR_PHYRDY_CHG) {
|
||||
ata_ehi_hotplugged(&host->ports[0]->link.eh_info);
|
||||
ata_port_freeze(host->ports[0]);
|
||||
rc = IRQ_HANDLED;
|
||||
}
|
||||
/* check for hotplug on port 1 */
|
||||
svia_scr_read(&host->ports[1]->link, SCR_ERROR, &serror);
|
||||
if (serror & SERR_PHYRDY_CHG) {
|
||||
ata_ehi_hotplugged(&host->ports[1]->link.eh_info);
|
||||
ata_port_freeze(host->ports[1]);
|
||||
rc = IRQ_HANDLED;
|
||||
}
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void vt6421_error_handler(struct ata_port *ap)
|
||||
{
|
||||
struct svia_priv *hpriv = ap->host->private_data;
|
||||
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
|
||||
u32 serror;
|
||||
|
||||
/* see svia_configure() for description */
|
||||
if (!hpriv->wd_workaround) {
|
||||
svia_scr_read(&ap->link, SCR_ERROR, &serror);
|
||||
if (serror == 0x1000500) {
|
||||
ata_port_warn(ap, "Incompatible drive: enabling workaround. This slows down transfer rate to ~60 MB/s");
|
||||
svia_wd_fix(pdev);
|
||||
hpriv->wd_workaround = true;
|
||||
ap->link.eh_context.i.flags |= ATA_EHI_QUIET;
|
||||
}
|
||||
}
|
||||
|
||||
ata_sff_error_handler(ap);
|
||||
}
|
||||
|
||||
static void svia_configure(struct pci_dev *pdev, int board_id,
|
||||
struct svia_priv *hpriv)
|
||||
{
|
||||
u8 tmp8;
|
||||
|
||||
|
@ -572,6 +644,16 @@ static void svia_configure(struct pci_dev *pdev, int board_id)
|
|||
pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
|
||||
}
|
||||
|
||||
/* enable IRQ on hotplug */
|
||||
pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
|
||||
if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
|
||||
dev_dbg(&pdev->dev,
|
||||
"enabling SATA hotplug (0x%x)\n",
|
||||
(int) tmp8);
|
||||
tmp8 |= SATA_HOTPLUG;
|
||||
pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
|
||||
}
|
||||
|
||||
/*
|
||||
* vt6420/1 has problems talking to some drives. The following
|
||||
* is the fix from Joseph Chan <JosephChan@via.com.tw>.
|
||||
|
@ -593,11 +675,15 @@ static void svia_configure(struct pci_dev *pdev, int board_id)
|
|||
* https://bugzilla.kernel.org/show_bug.cgi?id=15173
|
||||
* http://article.gmane.org/gmane.linux.ide/46352
|
||||
* http://thread.gmane.org/gmane.linux.kernel/1062139
|
||||
*
|
||||
* As the fix slows down data transfer, apply it only if the error
|
||||
* actually appears - see vt6421_error_handler()
|
||||
* Apply the fix always on vt6420 as we don't know if SCR_ERROR can be
|
||||
* read safely.
|
||||
*/
|
||||
if (board_id == vt6420 || board_id == vt6421) {
|
||||
pci_read_config_byte(pdev, 0x52, &tmp8);
|
||||
tmp8 |= 1 << 2;
|
||||
pci_write_config_byte(pdev, 0x52, tmp8);
|
||||
if (board_id == vt6420) {
|
||||
svia_wd_fix(pdev);
|
||||
hpriv->wd_workaround = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -608,6 +694,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct ata_host *host = NULL;
|
||||
int board_id = (int) ent->driver_data;
|
||||
const unsigned *bar_sizes;
|
||||
struct svia_priv *hpriv;
|
||||
|
||||
ata_print_version_once(&pdev->dev, DRV_VERSION);
|
||||
|
||||
|
@ -647,11 +734,39 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
svia_configure(pdev, board_id);
|
||||
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
|
||||
if (!hpriv)
|
||||
return -ENOMEM;
|
||||
host->private_data = hpriv;
|
||||
|
||||
svia_configure(pdev, board_id, hpriv);
|
||||
|
||||
pci_set_master(pdev);
|
||||
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
|
||||
IRQF_SHARED, &svia_sht);
|
||||
if (board_id == vt6421)
|
||||
return ata_host_activate(host, pdev->irq, vt6421_interrupt,
|
||||
IRQF_SHARED, &svia_sht);
|
||||
else
|
||||
return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
|
||||
IRQF_SHARED, &svia_sht);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int svia_pci_device_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct ata_host *host = pci_get_drvdata(pdev);
|
||||
struct svia_priv *hpriv = host->private_data;
|
||||
int rc;
|
||||
|
||||
rc = ata_pci_device_do_resume(pdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (hpriv->wd_workaround)
|
||||
svia_wd_fix(pdev);
|
||||
ata_host_resume(host);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
module_pci_driver(svia_pci_driver);
|
||||
|
|
|
@ -250,6 +250,12 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
|
|||
if (error)
|
||||
goto out_destroy_freelist;
|
||||
|
||||
/*
|
||||
* Increase usage count temporarily here so that calling
|
||||
* scsi_autopm_put_host() will trigger runtime idle if there is
|
||||
* nothing else preventing suspending the device.
|
||||
*/
|
||||
pm_runtime_get_noresume(&shost->shost_gendev);
|
||||
pm_runtime_set_active(&shost->shost_gendev);
|
||||
pm_runtime_enable(&shost->shost_gendev);
|
||||
device_enable_async_suspend(&shost->shost_gendev);
|
||||
|
@ -290,6 +296,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
|
|||
goto out_destroy_host;
|
||||
|
||||
scsi_proc_host_add(shost);
|
||||
scsi_autopm_put_host(shost);
|
||||
return error;
|
||||
|
||||
out_destroy_host:
|
||||
|
|
|
@ -139,6 +139,16 @@ static int scsi_bus_resume_common(struct device *dev,
|
|||
else
|
||||
fn = NULL;
|
||||
|
||||
/*
|
||||
* Forcibly set runtime PM status of request queue to "active" to
|
||||
* make sure we can again get requests from the queue (see also
|
||||
* blk_pm_peek_request()).
|
||||
*
|
||||
* The resume hook will correct runtime PM status of the disk.
|
||||
*/
|
||||
if (scsi_is_sdev_device(dev) && pm_runtime_suspended(dev))
|
||||
blk_set_runtime_active(to_scsi_device(dev)->request_queue);
|
||||
|
||||
if (fn) {
|
||||
async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
|
||||
|
||||
|
|
|
@ -1029,6 +1029,7 @@ extern int blk_pre_runtime_suspend(struct request_queue *q);
|
|||
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
|
||||
extern void blk_pre_runtime_resume(struct request_queue *q);
|
||||
extern void blk_post_runtime_resume(struct request_queue *q, int err);
|
||||
extern void blk_set_runtime_active(struct request_queue *q);
|
||||
#else
|
||||
static inline void blk_pm_runtime_init(struct request_queue *q,
|
||||
struct device *dev) {}
|
||||
|
@ -1039,6 +1040,7 @@ static inline int blk_pre_runtime_suspend(struct request_queue *q)
|
|||
static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
|
||||
static inline void blk_pre_runtime_resume(struct request_queue *q) {}
|
||||
static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
|
||||
extern inline void blk_set_runtime_active(struct request_queue *q) {}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue