aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/acglobal.h6
-rw-r--r--drivers/acpi/acpica/acmacros.h10
-rw-r--r--drivers/acpi/acpica/acpredef.h1
-rw-r--r--drivers/acpi/acpica/dsopcode.c4
-rw-r--r--drivers/acpi/acpica/evregion.c4
-rw-r--r--drivers/acpi/acpica/evxface.c1
-rw-r--r--drivers/acpi/acpica/evxfevnt.c1
-rw-r--r--drivers/acpi/acpica/evxfgpe.c1
-rw-r--r--drivers/acpi/acpica/evxfregn.c1
-rw-r--r--drivers/acpi/acpica/exdump.c4
-rw-r--r--drivers/acpi/acpica/exfldio.c10
-rw-r--r--drivers/acpi/acpica/exregion.c8
-rw-r--r--drivers/acpi/acpica/hwregs.c11
-rw-r--r--drivers/acpi/acpica/hwsleep.c1
-rw-r--r--drivers/acpi/acpica/hwtimer.c1
-rw-r--r--drivers/acpi/acpica/hwvalid.c16
-rw-r--r--drivers/acpi/acpica/hwxface.c1
-rw-r--r--drivers/acpi/acpica/nsdump.c12
-rw-r--r--drivers/acpi/acpica/nspredef.c18
-rw-r--r--drivers/acpi/acpica/nsxfeval.c1
-rw-r--r--drivers/acpi/acpica/nsxfname.c1
-rw-r--r--drivers/acpi/acpica/nsxfobj.c1
-rw-r--r--drivers/acpi/acpica/rsxface.c1
-rw-r--r--drivers/acpi/acpica/tbinstal.c31
-rw-r--r--drivers/acpi/acpica/tbutils.c21
-rw-r--r--drivers/acpi/acpica/tbxface.c1
-rw-r--r--drivers/acpi/acpica/tbxfroot.c7
-rw-r--r--drivers/acpi/acpica/utdebug.c1
-rw-r--r--drivers/acpi/acpica/utdecode.c1
-rw-r--r--drivers/acpi/acpica/utglobal.c1
-rw-r--r--drivers/acpi/acpica/utxface.c11
-rw-r--r--drivers/acpi/acpica/utxferror.c1
-rw-r--r--drivers/acpi/apei/Kconfig11
-rw-r--r--drivers/acpi/apei/apei-base.c35
-rw-r--r--drivers/acpi/apei/apei-internal.h15
-rw-r--r--drivers/acpi/apei/einj.c43
-rw-r--r--drivers/acpi/apei/erst-dbg.c6
-rw-r--r--drivers/acpi/apei/erst.c67
-rw-r--r--drivers/acpi/apei/ghes.c451
-rw-r--r--drivers/acpi/apei/hest.c17
-rw-r--r--drivers/amba/bus.c68
-rw-r--r--drivers/ata/Kconfig13
-rw-r--r--drivers/ata/Makefile2
-rw-r--r--drivers/ata/acard-ahci.c27
-rw-r--r--drivers/ata/ahci.c333
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_platform.c56
-rw-r--r--drivers/ata/ata_generic.c5
-rw-r--r--drivers/ata/ata_piix.c231
-rw-r--r--drivers/ata/libahci.c174
-rw-r--r--drivers/ata/libata-acpi.c70
-rw-r--r--drivers/ata/libata-core.c507
-rw-r--r--drivers/ata/libata-eh.c179
-rw-r--r--drivers/ata/libata-pmp.c168
-rw-r--r--drivers/ata/libata-scsi.c86
-rw-r--r--drivers/ata/libata-sff.c205
-rw-r--r--drivers/ata/libata-transport.c16
-rw-r--r--drivers/ata/pata_acpi.c8
-rw-r--r--drivers/ata/pata_ali.c10
-rw-r--r--drivers/ata/pata_amd.c8
-rw-r--r--drivers/ata/pata_arasan_cf.c6
-rw-r--r--drivers/ata/pata_artop.c112
-rw-r--r--drivers/ata/pata_at91.c7
-rw-r--r--drivers/ata/pata_atiixp.c35
-rw-r--r--drivers/ata/pata_atp867x.c13
-rw-r--r--drivers/ata/pata_bf54x.c4
-rw-r--r--drivers/ata/pata_cmd64x.c44
-rw-r--r--drivers/ata/pata_cs5520.c3
-rw-r--r--drivers/ata/pata_cs5535.c16
-rw-r--r--drivers/ata/pata_efar.c27
-rw-r--r--drivers/ata/pata_hpt366.c35
-rw-r--r--drivers/ata/pata_hpt3x3.c4
-rw-r--r--drivers/ata/pata_icside.c4
-rw-r--r--drivers/ata/pata_it8213.c29
-rw-r--r--drivers/ata/pata_it821x.c16
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c4
-rw-r--r--drivers/ata/pata_legacy.c131
-rw-r--r--drivers/ata/pata_macio.c10
-rw-r--r--drivers/ata/pata_mpc52xx.c2
-rw-r--r--drivers/ata/pata_mpiix.c4
-rw-r--r--drivers/ata/pata_netcell.c5
-rw-r--r--drivers/ata/pata_ns87410.c2
-rw-r--r--drivers/ata/pata_ns87415.c5
-rw-r--r--drivers/ata/pata_octeon_cf.c8
-rw-r--r--drivers/ata/pata_of_platform.c8
-rw-r--r--drivers/ata/pata_oldpiix.c5
-rw-r--r--drivers/ata/pata_opti.c4
-rw-r--r--drivers/ata/pata_optidma.c4
-rw-r--r--drivers/ata/pata_pcmcia.c5
-rw-r--r--drivers/ata/pata_pdc2027x.c36
-rw-r--r--drivers/ata/pata_platform.c2
-rw-r--r--drivers/ata/pata_qdi.c366
-rw-r--r--drivers/ata/pata_radisys.c5
-rw-r--r--drivers/ata/pata_rdc.c21
-rw-r--r--drivers/ata/pata_rz1000.c4
-rw-r--r--drivers/ata/pata_samsung_cf.c2
-rw-r--r--drivers/ata/pata_sc1200.c14
-rw-r--r--drivers/ata/pata_scc.c35
-rw-r--r--drivers/ata/pata_sch.c5
-rw-r--r--drivers/ata/pata_serverworks.c140
-rw-r--r--drivers/ata/pata_sil680.c98
-rw-r--r--drivers/ata/pata_sis.c159
-rw-r--r--drivers/ata/pata_sl82c105.c43
-rw-r--r--drivers/ata/pata_triflex.c4
-rw-r--r--drivers/ata/pata_via.c59
-rw-r--r--drivers/ata/pdc_adma.c10
-rw-r--r--drivers/ata/sata_dwc_460ex.c64
-rw-r--r--drivers/ata/sata_fsl.c45
-rw-r--r--drivers/ata/sata_inic162x.c31
-rw-r--r--drivers/ata/sata_mv.c110
-rw-r--r--drivers/ata/sata_nv.c79
-rw-r--r--drivers/ata/sata_promise.c6
-rw-r--r--drivers/ata/sata_qstor.c13
-rw-r--r--drivers/ata/sata_sil.c24
-rw-r--r--drivers/ata/sata_sil24.c49
-rw-r--r--drivers/ata/sata_sis.c28
-rw-r--r--drivers/ata/sata_svw.c4
-rw-r--r--drivers/ata/sata_sx4.c4
-rw-r--r--drivers/ata/sata_uli.c4
-rw-r--r--drivers/ata/sata_via.c42
-rw-r--r--drivers/ata/sata_vsc.c9
-rw-r--r--drivers/atm/ambassador.c15
-rw-r--r--drivers/atm/ambassador.h4
-rw-r--r--drivers/atm/atmtcp.c2
-rw-r--r--drivers/atm/eni.c7
-rw-r--r--drivers/atm/eni.h3
-rw-r--r--drivers/atm/firestream.c3
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/horizon.c3
-rw-r--r--drivers/atm/idt77252.c18
-rw-r--r--drivers/atm/iphase.c268
-rw-r--r--drivers/atm/iphase.h395
-rw-r--r--drivers/atm/lanai.c12
-rw-r--r--drivers/atm/nicstar.c2
-rw-r--r--drivers/atm/suni.c2
-rw-r--r--drivers/atm/uPD98402.c2
-rw-r--r--drivers/atm/zatm.c3
-rw-r--r--drivers/auxdisplay/ks0108.c1
-rw-r--r--drivers/bcma/Kconfig24
-rw-r--r--drivers/bcma/Makefile5
-rw-r--r--drivers/bcma/bcma_private.h30
-rw-r--r--drivers/bcma/core.c78
-rw-r--r--drivers/bcma/driver_chipcommon.c70
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c200
-rw-r--r--drivers/bcma/driver_pci.c79
-rw-r--r--drivers/bcma/host_pci.c94
-rw-r--r--drivers/bcma/main.c113
-rw-r--r--drivers/bcma/scan.c348
-rw-r--r--drivers/cdrom/cdrom.c8
-rw-r--r--drivers/char/agp/backend.c3
-rw-r--r--drivers/char/agp/hp-agp.c6
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/agp/intel-gtt.c35
-rw-r--r--drivers/char/hw_random/Kconfig40
-rw-r--r--drivers/char/hw_random/Makefile2
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/char/hw_random/n2-drv.c29
-rw-r--r--drivers/char/hw_random/n2rng.h2
-rw-r--r--drivers/char/hw_random/nomadik-rng.c3
-rw-r--r--drivers/char/hw_random/omap-rng.c6
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c3
-rw-r--r--drivers/char/hw_random/virtio-rng.c1
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c76
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/tpm.c154
-rw-r--r--drivers/char/tpm/tpm.h10
-rw-r--r--drivers/char/tpm/tpm_nsc.c16
-rw-r--r--drivers/char/tpm/tpm_tis.c213
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c2
-rw-r--r--drivers/clk/Kconfig3
-rw-r--r--drivers/clocksource/Kconfig27
-rw-r--r--drivers/clocksource/Makefile4
-rw-r--r--drivers/clocksource/i8253.c114
-rw-r--r--drivers/clocksource/sh_cmt.c35
-rw-r--r--drivers/clocksource/sh_mtu2.c1
-rw-r--r--drivers/clocksource/sh_tmu.c1
-rw-r--r--drivers/connector/cn_proc.c147
-rw-r--r--drivers/connector/connector.c7
-rw-r--r--drivers/crypto/caam/caamalg.c1832
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c30
-rw-r--r--drivers/crypto/caam/desc_constr.h58
-rw-r--r--drivers/crypto/caam/error.c10
-rw-r--r--drivers/dca/dca-core.c81
-rw-r--r--drivers/dca/dca-sysfs.c1
-rw-r--r--drivers/dma/Kconfig15
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/TODO1
-rw-r--r--drivers/dma/amba-pl08x.c857
-rw-r--r--drivers/dma/at_hdmac.c168
-rw-r--r--drivers/dma/at_hdmac_regs.h24
-rw-r--r--drivers/dma/coh901318.c20
-rw-r--r--drivers/dma/dmaengine.c13
-rw-r--r--drivers/dma/dmatest.c24
-rw-r--r--drivers/dma/dw_dmac.c5
-rw-r--r--drivers/dma/imx-dma.c5
-rw-r--r--drivers/dma/imx-sdma.c141
-rw-r--r--drivers/dma/intel_mid_dma.c12
-rw-r--r--drivers/dma/ioat/dma.c62
-rw-r--r--drivers/dma/ioat/dma.h7
-rw-r--r--drivers/dma/ioat/dma_v2.c19
-rw-r--r--drivers/dma/ioat/dma_v3.c19
-rw-r--r--drivers/dma/ioat/pci.c11
-rw-r--r--drivers/dma/ipu/ipu_idmac.c73
-rw-r--r--drivers/dma/ipu/ipu_irq.c48
-rw-r--r--drivers/dma/mpc512x_dma.c1
-rw-r--r--drivers/dma/mv_xor.c84
-rw-r--r--drivers/dma/mv_xor.h1
-rw-r--r--drivers/dma/mxs-dma.c58
-rw-r--r--drivers/dma/pch_dma.c5
-rw-r--r--drivers/dma/pl330.c298
-rw-r--r--drivers/dma/shdma.c211
-rw-r--r--drivers/dma/shdma.h11
-rw-r--r--drivers/dma/ste_dma40.c318
-rw-r--r--drivers/dma/ste_dma40_ll.h3
-rw-r--r--drivers/dma/timb_dma.c5
-rw-r--r--drivers/eisa/pci_eisa.c39
-rw-r--r--drivers/firewire/core-card.c2
-rw-r--r--drivers/firewire/core-cdev.c3
-rw-r--r--drivers/firewire/core-device.c2
-rw-r--r--drivers/firewire/core-iso.c1
-rw-r--r--drivers/firewire/core-topology.c2
-rw-r--r--drivers/firewire/core-transaction.c4
-rw-r--r--drivers/firewire/core.h2
-rw-r--r--drivers/firewire/net.c26
-rw-r--r--drivers/firewire/nosy.c2
-rw-r--r--drivers/firewire/ohci.c306
-rw-r--r--drivers/firewire/sbp2.c260
229 files changed, 8460 insertions, 4530 deletions
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 73863d8..76dc02f 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -126,6 +126,12 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
*/
u8 ACPI_INIT_GLOBAL(acpi_gbl_truncate_io_addresses, FALSE);
+/*
+ * Disable runtime checking and repair of values returned by control methods.
+ * Use only if the repair is causing a problem on a particular machine.
+ */
+u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_auto_repair, FALSE);
+
/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
struct acpi_table_fadt acpi_gbl_FADT;
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index b7491ee..6454fe6 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -59,19 +59,15 @@
#define ACPI_SET64(ptr) *ACPI_CAST_PTR (u64, ptr)
/*
- * printf() format helpers
+ * printf() format helper. This macros is a workaround for the difficulties
+ * with emitting 64-bit integers and 64-bit pointers with the same code
+ * for both 32-bit and 64-bit hosts.
*/
/* Split 64-bit integer into two 32-bit values. Use with %8.8_x%8.8_x */
#define ACPI_FORMAT_UINT64(i) ACPI_HIDWORD(i), ACPI_LODWORD(i)
-#if ACPI_MACHINE_WIDTH == 64
-#define ACPI_FORMAT_NATIVE_UINT(i) ACPI_FORMAT_UINT64(i)
-#else
-#define ACPI_FORMAT_NATIVE_UINT(i) 0, (i)
-#endif
-
/*
* Macros for moving data around to/from buffers that are possibly unaligned.
* If the hardware supports the transfer of unaligned data, just do the store.
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 94e73c9..c445cca 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -468,6 +468,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_SWS", 0, ACPI_RTYPE_INTEGER}},
{{"_TC1", 0, ACPI_RTYPE_INTEGER}},
{{"_TC2", 0, ACPI_RTYPE_INTEGER}},
+ {{"_TDL", 0, ACPI_RTYPE_INTEGER}},
{{"_TIP", 1, ACPI_RTYPE_INTEGER}},
{{"_TIV", 1, ACPI_RTYPE_INTEGER}},
{{"_TMP", 0, ACPI_RTYPE_INTEGER}},
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index c627a28..9d19587 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -446,7 +446,7 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
obj_desc,
- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
+ ACPI_FORMAT_UINT64(obj_desc->region.address),
obj_desc->region.length));
/* Now the address and length are valid for this opregion */
@@ -545,7 +545,7 @@ acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
obj_desc,
- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
+ ACPI_FORMAT_UINT64(obj_desc->region.address),
obj_desc->region.length));
/* Now the address and length are valid for this opregion */
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index f0edf5c..cb18ab4 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -450,8 +450,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
"Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
&region_obj->region.handler->address_space, handler,
- ACPI_FORMAT_NATIVE_UINT(region_obj->region.address +
- region_offset),
+ ACPI_FORMAT_UINT64(region_obj->region.address +
+ region_offset),
acpi_ut_get_region_name(region_obj->region.
space_id)));
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index e114140..f4f523b 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -41,6 +41,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index c57b5c7..20516e5 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -41,6 +41,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "actables.h"
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 52aaff3..f06a3ee 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -41,6 +41,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index 00cd956..aee887e 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -42,6 +42,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 61b8c0e..c572fa0 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -613,8 +613,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
acpi_os_printf("\n");
} else {
acpi_os_printf(" base %8.8X%8.8X Length %X\n",
- ACPI_FORMAT_NATIVE_UINT(obj_desc->region.
- address),
+ ACPI_FORMAT_UINT64(obj_desc->region.
+ address),
obj_desc->region.length);
}
break;
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index b334f54..be22142 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -257,17 +257,15 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
- " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
+ " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id,
obj_desc->common_field.access_byte_width,
obj_desc->common_field.base_byte_offset,
- field_datum_byte_offset, ACPI_CAST_PTR(void,
- (rgn_desc->
- region.
- address +
- region_offset))));
+ field_datum_byte_offset,
+ ACPI_FORMAT_UINT64(rgn_desc->region.address +
+ region_offset)));
/* Invoke the appropriate address_space/op_region handler */
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index f0d5e14..dd5e930 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -174,7 +174,7 @@ acpi_ex_system_memory_space_handler(u32 function,
if (!mem_info->mapped_logical_address) {
ACPI_ERROR((AE_INFO,
"Could not map memory at 0x%8.8X%8.8X, size %u",
- ACPI_FORMAT_NATIVE_UINT(address),
+ ACPI_FORMAT_UINT64(address),
(u32) map_length));
mem_info->mapped_length = 0;
return_ACPI_STATUS(AE_NO_MEMORY);
@@ -195,8 +195,7 @@ acpi_ex_system_memory_space_handler(u32 function,
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"System-Memory (width %u) R/W %u Address=%8.8X%8.8X\n",
- bit_width, function,
- ACPI_FORMAT_NATIVE_UINT(address)));
+ bit_width, function, ACPI_FORMAT_UINT64(address)));
/*
* Perform the memory read or write
@@ -298,8 +297,7 @@ acpi_ex_system_io_space_handler(u32 function,
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"System-IO (width %u) R/W %u Address=%8.8X%8.8X\n",
- bit_width, function,
- ACPI_FORMAT_NATIVE_UINT(address)));
+ bit_width, function, ACPI_FORMAT_UINT64(address)));
/* Decode the function parameter */
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 55accb7..cc70f3f 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -269,16 +269,17 @@ acpi_status acpi_hw_clear_acpi_status(void)
status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
ACPI_BITMASK_ALL_FIXED_STATUS);
- if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
- }
+
+ acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+
+ if (ACPI_FAILURE(status))
+ goto exit;
/* Clear the GPE Bits in all GPE registers in all GPE blocks */
status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL);
- unlock_and_exit:
- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+exit:
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 2ac28bb..d52da30 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -46,6 +46,7 @@
#include "accommon.h"
#include "actables.h"
#include <linux/tboot.h>
+#include <linux/module.h>
#define _COMPONENT ACPI_HARDWARE
ACPI_MODULE_NAME("hwsleep")
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 9c8eb71..50d21c4 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -42,6 +42,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 5f16058..d43b8af 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -141,17 +141,17 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
byte_width = ACPI_DIV_8(bit_width);
last_address = address + byte_width - 1;
- ACPI_DEBUG_PRINT((ACPI_DB_IO, "Address %p LastAddress %p Length %X",
- ACPI_CAST_PTR(void, address), ACPI_CAST_PTR(void,
- last_address),
- byte_width));
+ ACPI_DEBUG_PRINT((ACPI_DB_IO,
+ "Address %8.8X%8.8X LastAddress %8.8X%8.8X Length %X",
+ ACPI_FORMAT_UINT64(address),
+ ACPI_FORMAT_UINT64(last_address), byte_width));
/* Maximum 16-bit address in I/O space */
if (last_address > ACPI_UINT16_MAX) {
ACPI_ERROR((AE_INFO,
- "Illegal I/O port address/length above 64K: %p/0x%X",
- ACPI_CAST_PTR(void, address), byte_width));
+ "Illegal I/O port address/length above 64K: %8.8X%8.8X/0x%X",
+ ACPI_FORMAT_UINT64(address), byte_width));
return_ACPI_STATUS(AE_LIMIT);
}
@@ -180,8 +180,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
if (acpi_gbl_osi_data >= port_info->osi_dependency) {
ACPI_DEBUG_PRINT((ACPI_DB_IO,
- "Denied AML access to port 0x%p/%X (%s 0x%.4X-0x%.4X)",
- ACPI_CAST_PTR(void, address),
+ "Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)",
+ ACPI_FORMAT_UINT64(address),
byte_width, port_info->name,
port_info->start,
port_info->end));
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index f75f81a..c2793a8 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -42,6 +42,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index b683cc2..0a68c73 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -251,12 +251,11 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
switch (type) {
case ACPI_TYPE_PROCESSOR:
- acpi_os_printf("ID %X Len %.4X Addr %p\n",
+ acpi_os_printf("ID %02X Len %02X Addr %8.8X%8.8X\n",
obj_desc->processor.proc_id,
obj_desc->processor.length,
- ACPI_CAST_PTR(void,
- obj_desc->processor.
- address));
+ ACPI_FORMAT_UINT64(obj_desc->processor.
+ address));
break;
case ACPI_TYPE_DEVICE:
@@ -327,8 +326,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
space_id));
if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n",
- ACPI_FORMAT_NATIVE_UINT
- (obj_desc->region.address),
+ ACPI_FORMAT_UINT64(obj_desc->
+ region.
+ address),
obj_desc->region.length);
} else {
acpi_os_printf
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index dc00582..c845c80 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -193,14 +193,20 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
}
/*
- * 1) We have a return value, but if one wasn't expected, just exit, this is
- * not a problem. For example, if the "Implicit Return" feature is
- * enabled, methods will always return a value.
+ * Return value validation and possible repair.
*
- * 2) If the return value can be of any type, then we cannot perform any
- * validation, exit.
+ * 1) Don't perform return value validation/repair if this feature
+ * has been disabled via a global option.
+ *
+ * 2) We have a return value, but if one wasn't expected, just exit,
+ * this is not a problem. For example, if the "Implicit Return"
+ * feature is enabled, methods will always return a value.
+ *
+ * 3) If the return value can be of any type, then we cannot perform
+ * any validation, just exit.
*/
- if ((!predefined->info.expected_btypes) ||
+ if (acpi_gbl_disable_auto_repair ||
+ (!predefined->info.expected_btypes) ||
(predefined->info.expected_btypes == ACPI_RTYPE_ALL)) {
goto cleanup;
}
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index c53f004..e7f016d 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -42,6 +42,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 3fd4526..83bf930 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -42,6 +42,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index db7660f..57e6d82 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -42,6 +42,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 2ff657a..fe86b37 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -41,6 +41,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acresrc.h"
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 48db094..08a3380 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -126,12 +126,29 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
}
/*
- * Originally, we checked the table signature for "SSDT" or "PSDT" here.
- * Next, we added support for OEMx tables, signature "OEM".
- * Valid tables were encountered with a null signature, so we've just
- * given up on validating the signature, since it seems to be a waste
- * of code. The original code was removed (05/2008).
+ * Validate the incoming table signature.
+ *
+ * 1) Originally, we checked the table signature for "SSDT" or "PSDT".
+ * 2) We added support for OEMx tables, signature "OEM".
+ * 3) Valid tables were encountered with a null signature, so we just
+ * gave up on validating the signature, (05/2008).
+ * 4) We encountered non-AML tables such as the MADT, which caused
+ * interpreter errors and kernel faults. So now, we once again allow
+ * only "SSDT", "OEMx", and now, also a null signature. (05/2011).
*/
+ if ((table_desc->pointer->signature[0] != 0x00) &&
+ (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT))
+ && (ACPI_STRNCMP(table_desc->pointer->signature, "OEM", 3))) {
+ ACPI_ERROR((AE_INFO,
+ "Table has invalid signature [%4.4s] (0x%8.8X), must be SSDT or OEMx",
+ acpi_ut_valid_acpi_name(*(u32 *)table_desc->
+ pointer->
+ signature) ? table_desc->
+ pointer->signature : "????",
+ *(u32 *)table_desc->pointer->signature));
+
+ return_ACPI_STATUS(AE_BAD_SIGNATURE);
+ }
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
@@ -211,9 +228,9 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
status = acpi_os_table_override(table_desc->pointer, &override_table);
if (ACPI_SUCCESS(status) && override_table) {
ACPI_INFO((AE_INFO,
- "%4.4s @ 0x%p Table override, replaced with:",
+ "%4.4s @ 0x%8.8X%8.8X Table override, replaced with:",
table_desc->pointer->signature,
- ACPI_CAST_PTR(void, table_desc->address)));
+ ACPI_FORMAT_UINT64(table_desc->address)));
/* We can delete the table that was passed as a parameter */
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 0f2d395..24de78f 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -237,16 +237,12 @@ acpi_tb_print_table_header(acpi_physical_address address,
{
struct acpi_table_header local_header;
- /*
- * The reason that the Address is cast to a void pointer is so that we
- * can use %p which will work properly on both 32-bit and 64-bit hosts.
- */
if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_FACS)) {
/* FACS only has signature and length fields */
- ACPI_INFO((AE_INFO, "%4.4s %p %05X",
- header->signature, ACPI_CAST_PTR(void, address),
+ ACPI_INFO((AE_INFO, "%4.4s 0x%8.8X%8.8X %05X",
+ header->signature, ACPI_FORMAT_UINT64(address),
header->length));
} else if (ACPI_COMPARE_NAME(header->signature, ACPI_SIG_RSDP)) {
@@ -257,8 +253,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
header)->oem_id, ACPI_OEM_ID_SIZE);
acpi_tb_fix_string(local_header.oem_id, ACPI_OEM_ID_SIZE);
- ACPI_INFO((AE_INFO, "RSDP %p %05X (v%.2d %6.6s)",
- ACPI_CAST_PTR (void, address),
+ ACPI_INFO((AE_INFO, "RSDP 0x%8.8X%8.8X %05X (v%.2d %6.6s)",
+ ACPI_FORMAT_UINT64(address),
(ACPI_CAST_PTR(struct acpi_table_rsdp, header)->
revision >
0) ? ACPI_CAST_PTR(struct acpi_table_rsdp,
@@ -272,8 +268,8 @@ acpi_tb_print_table_header(acpi_physical_address address,
acpi_tb_cleanup_table_header(&local_header, header);
ACPI_INFO((AE_INFO,
- "%4.4s %p %05X (v%.2d %6.6s %8.8s %08X %4.4s %08X)",
- local_header.signature, ACPI_CAST_PTR(void, address),
+ "%-4.4s 0x%8.8X%8.8X %05X (v%.2d %-6.6s %-8.8s %08X %-4.4s %08X)",
+ local_header.signature, ACPI_FORMAT_UINT64(address),
local_header.length, local_header.revision,
local_header.oem_id, local_header.oem_table_id,
local_header.oem_revision,
@@ -488,9 +484,8 @@ acpi_tb_install_table(acpi_physical_address address,
status = acpi_os_table_override(mapped_table, &override_table);
if (ACPI_SUCCESS(status) && override_table) {
ACPI_INFO((AE_INFO,
- "%4.4s @ 0x%p Table override, replaced with:",
- mapped_table->signature, ACPI_CAST_PTR(void,
- address)));
+ "%4.4s @ 0x%8.8X%8.8X Table override, replaced with:",
+ mapped_table->signature, ACPI_FORMAT_UINT64(address)));
acpi_gbl_root_table_list.tables[table_index].pointer =
override_table;
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 55edd4a..d05f2fe 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -42,6 +42,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 7eb6c6c..0272fbe 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -119,7 +119,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
*
******************************************************************************/
-acpi_status acpi_find_root_pointer(acpi_size *table_address)
+acpi_status acpi_find_root_pointer(acpi_physical_address * table_address)
{
u8 *table_ptr;
u8 *mem_rover;
@@ -177,7 +177,8 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
physical_address +=
(u32) ACPI_PTR_DIFF(mem_rover, table_ptr);
- *table_address = physical_address;
+ *table_address =
+ (acpi_physical_address) physical_address;
return_ACPI_STATUS(AE_OK);
}
}
@@ -210,7 +211,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
(ACPI_HI_RSDP_WINDOW_BASE +
ACPI_PTR_DIFF(mem_rover, table_ptr));
- *table_address = physical_address;
+ *table_address = (acpi_physical_address) physical_address;
return_ACPI_STATUS(AE_OK);
}
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index a9bcd81..a1f8d75 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -41,6 +41,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 97cb36f..8b087e2 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -41,6 +41,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 833a38a..ffba0a3 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -43,6 +43,7 @@
#define DEFINE_ACPI_GLOBALS
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 98ad125..ecc428e 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -41,6 +41,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
@@ -162,10 +163,12 @@ acpi_status acpi_enable_subsystem(u32 flags)
* Obtain a permanent mapping for the FACS. This is required for the
* Global Lock and the Firmware Waking Vector
*/
- status = acpi_tb_initialize_facs();
- if (ACPI_FAILURE(status)) {
- ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
- return_ACPI_STATUS(status);
+ if (!(flags & ACPI_NO_FACS_INIT)) {
+ status = acpi_tb_initialize_facs();
+ if (ACPI_FAILURE(status)) {
+ ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
+ return_ACPI_STATUS(status);
+ }
}
/*
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 916ae09..8d0245e 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -41,6 +41,7 @@
* POSSIBILITY OF SUCH DAMAGES.
*/
+#include <linux/export.h>
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index f739a70..f0c1ce9 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -10,9 +10,11 @@ config ACPI_APEI
error injection.
config ACPI_APEI_GHES
- tristate "APEI Generic Hardware Error Source"
+ bool "APEI Generic Hardware Error Source"
depends on ACPI_APEI && X86
select ACPI_HED
+ select IRQ_WORK
+ select GENERIC_ALLOCATOR
help
Generic Hardware Error Source provides a way to report
platform hardware errors (such as that from chipset). It
@@ -30,6 +32,13 @@ config ACPI_APEI_PCIEAER
PCIe AER errors may be reported via APEI firmware first mode.
Turn on this option to enable the corresponding support.
+config ACPI_APEI_MEMORY_FAILURE
+ bool "APEI memory error recovering support"
+ depends on ACPI_APEI && MEMORY_FAILURE
+ help
+ Memory errors may be reported via APEI firmware first mode.
+ Turn on this option to enable the memory recovering support.
+
config ACPI_APEI_EINJ
tristate "APEI Error INJection (EINJ)"
depends on ACPI_APEI && DEBUG_FS
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 4a904a4..6154036 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -157,9 +157,10 @@ EXPORT_SYMBOL_GPL(apei_exec_noop);
* Interpret the specified action. Go through whole action table,
* execute all instructions belong to the action.
*/
-int apei_exec_run(struct apei_exec_context *ctx, u8 action)
+int __apei_exec_run(struct apei_exec_context *ctx, u8 action,
+ bool optional)
{
- int rc;
+ int rc = -ENOENT;
u32 i, ip;
struct acpi_whea_header *entry;
apei_exec_ins_func_t run;
@@ -198,9 +199,9 @@ rewind:
goto rewind;
}
- return 0;
+ return !optional && rc < 0 ? rc : 0;
}
-EXPORT_SYMBOL_GPL(apei_exec_run);
+EXPORT_SYMBOL_GPL(__apei_exec_run);
typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
struct acpi_whea_header *entry,
@@ -603,3 +604,29 @@ struct dentry *apei_get_debugfs_dir(void)
return dapei;
}
EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
+
+int apei_osc_setup(void)
+{
+ static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c";
+ acpi_handle handle;
+ u32 capbuf[3];
+ struct acpi_osc_context context = {
+ .uuid_str = whea_uuid_str,
+ .rev = 1,
+ .cap.length = sizeof(capbuf),
+ .cap.pointer = capbuf,
+ };
+
+ capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
+ capbuf[OSC_SUPPORT_TYPE] = 1;
+ capbuf[OSC_CONTROL_TYPE] = 0;
+
+ if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))
+ || ACPI_FAILURE(acpi_run_osc(handle, &context)))
+ return -EIO;
+ else {
+ kfree(context.ret.pointer);
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(apei_osc_setup);
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index ef0581f..f57050e 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -50,7 +50,18 @@ static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx)
return ctx->value;
}
-int apei_exec_run(struct apei_exec_context *ctx, u8 action);
+int __apei_exec_run(struct apei_exec_context *ctx, u8 action, bool optional);
+
+static inline int apei_exec_run(struct apei_exec_context *ctx, u8 action)
+{
+ return __apei_exec_run(ctx, action, 0);
+}
+
+/* It is optional whether the firmware provides the action */
+static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 action)
+{
+ return __apei_exec_run(ctx, action, 1);
+}
/* Common instruction implementation */
@@ -113,4 +124,6 @@ void apei_estatus_print(const char *pfx,
const struct acpi_hest_generic_status *estatus);
int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
+
+int apei_osc_setup(void);
#endif
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index f74b2ea..589b96c 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -46,7 +46,8 @@
* Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
* EINJ table through an unpublished extension. Use with caution as
* most will ignore the parameter and make their own choice of address
- * for error injection.
+ * for error injection. This extension is used only if
+ * param_extension module parameter is specified.
*/
struct einj_parameter {
u64 type;
@@ -65,6 +66,9 @@ struct einj_parameter {
((struct acpi_whea_header *)((char *)(tab) + \
sizeof(struct acpi_table_einj)))
+static bool param_extension;
+module_param(param_extension, bool, 0);
+
static struct acpi_table_einj *einj_tab;
static struct apei_resources einj_resources;
@@ -285,7 +289,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
einj_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION);
+ rc = apei_exec_run_optional(&ctx, ACPI_EINJ_BEGIN_OPERATION);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, type);
@@ -323,7 +327,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
rc = __einj_error_trigger(trigger_paddr);
if (rc)
return rc;
- rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION);
+ rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
return rc;
}
@@ -489,14 +493,6 @@ static int __init einj_init(void)
einj_debug_dir, NULL, &error_type_fops);
if (!fentry)
goto err_cleanup;
- fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param1);
- if (!fentry)
- goto err_cleanup;
- fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param2);
- if (!fentry)
- goto err_cleanup;
fentry = debugfs_create_file("error_inject", S_IWUSR,
einj_debug_dir, NULL, &error_inject_fops);
if (!fentry)
@@ -513,12 +509,23 @@ static int __init einj_init(void)
rc = apei_exec_pre_map_gars(&ctx);
if (rc)
goto err_release;
- param_paddr = einj_get_parameter_address();
- if (param_paddr) {
- einj_param = ioremap(param_paddr, sizeof(*einj_param));
- rc = -ENOMEM;
- if (!einj_param)
- goto err_unmap;
+ if (param_extension) {
+ param_paddr = einj_get_parameter_address();
+ if (param_paddr) {
+ einj_param = ioremap(param_paddr, sizeof(*einj_param));
+ rc = -ENOMEM;
+ if (!einj_param)
+ goto err_unmap;
+ fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param1);
+ if (!fentry)
+ goto err_unmap;
+ fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param2);
+ if (!fentry)
+ goto err_unmap;
+ } else
+ pr_warn(EINJ_PFX "Parameter extension is not supported.\n");
}
pr_info(EINJ_PFX "Error INJection is initialized.\n");
@@ -526,6 +533,8 @@ static int __init einj_init(void)
return 0;
err_unmap:
+ if (einj_param)
+ iounmap(einj_param);
apei_exec_post_unmap_gars(&ctx);
err_release:
apei_resources_release(&einj_resources);
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index a4cfb64..903549d 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -33,7 +33,7 @@
#define ERST_DBG_PFX "ERST DBG: "
-#define ERST_DBG_RECORD_LEN_MAX 4096
+#define ERST_DBG_RECORD_LEN_MAX 0x4000
static void *erst_dbg_buf;
static unsigned int erst_dbg_buf_len;
@@ -213,6 +213,10 @@ static struct miscdevice erst_dbg_dev = {
static __init int erst_dbg_init(void)
{
+ if (erst_disable) {
+ pr_info(ERST_DBG_PFX "ERST support is disabled.\n");
+ return -ENODEV;
+ }
return misc_register(&erst_dbg_dev);
}
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index e6cef8e..631b947 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -642,7 +642,7 @@ static int __erst_write_to_storage(u64 offset)
int rc;
erst_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_WRITE);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, offset);
@@ -666,7 +666,7 @@ static int __erst_write_to_storage(u64 offset)
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
if (rc)
return rc;
@@ -681,7 +681,7 @@ static int __erst_read_from_storage(u64 record_id, u64 offset)
int rc;
erst_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_READ);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, offset);
@@ -709,7 +709,7 @@ static int __erst_read_from_storage(u64 record_id, u64 offset)
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
if (rc)
return rc;
@@ -724,7 +724,7 @@ static int __erst_clear_from_storage(u64 record_id)
int rc;
erst_exec_ctx_init(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_CLEAR);
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, record_id);
@@ -748,7 +748,7 @@ static int __erst_clear_from_storage(u64 record_id)
if (rc)
return rc;
val = apei_exec_ctx_get_output(&ctx);
- rc = apei_exec_run(&ctx, ACPI_ERST_END);
+ rc = apei_exec_run_optional(&ctx, ACPI_ERST_END);
if (rc)
return rc;
@@ -932,8 +932,12 @@ static int erst_check_table(struct acpi_table_erst *erst_tab)
static int erst_open_pstore(struct pstore_info *psi);
static int erst_close_pstore(struct pstore_info *psi);
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time);
-static u64 erst_writer(enum pstore_type_id type, size_t size);
+ struct timespec *time, char **buf,
+ struct pstore_info *psi);
+static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
+ size_t size, struct pstore_info *psi);
+static int erst_clearer(enum pstore_type_id type, u64 id,
+ struct pstore_info *psi);
static struct pstore_info erst_info = {
.owner = THIS_MODULE,
@@ -942,7 +946,7 @@ static struct pstore_info erst_info = {
.close = erst_close_pstore,
.read = erst_reader,
.write = erst_writer,
- .erase = erst_clear
+ .erase = erst_clearer
};
#define CPER_CREATOR_PSTORE \
@@ -983,17 +987,23 @@ static int erst_close_pstore(struct pstore_info *psi)
}
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
- struct timespec *time)
+ struct timespec *time, char **buf,
+ struct pstore_info *psi)
{
int rc;
ssize_t len = 0;
u64 record_id;
- struct cper_pstore_record *rcd = (struct cper_pstore_record *)
- (erst_info.buf - sizeof(*rcd));
+ struct cper_pstore_record *rcd;
+ size_t rcd_len = sizeof(*rcd) + erst_info.bufsize;
if (erst_disable)
return -ENODEV;
+ rcd = kmalloc(rcd_len, GFP_KERNEL);
+ if (!rcd) {
+ rc = -ENOMEM;
+ goto out;
+ }
skip:
rc = erst_get_record_id_next(&reader_pos, &record_id);
if (rc)
@@ -1001,22 +1011,27 @@ skip:
/* no more record */
if (record_id == APEI_ERST_INVALID_RECORD_ID) {
- rc = -1;
+ rc = -EINVAL;
goto out;
}
- len = erst_read(record_id, &rcd->hdr, sizeof(*rcd) +
- erst_info.bufsize);
+ len = erst_read(record_id, &rcd->hdr, rcd_len);
/* The record may be cleared by others, try read next record */
if (len == -ENOENT)
goto skip;
- else if (len < 0) {
- rc = -1;
+ else if (len < sizeof(*rcd)) {
+ rc = -EIO;
goto out;
}
if (uuid_le_cmp(rcd->hdr.creator_id, CPER_CREATOR_PSTORE) != 0)
goto skip;
+ *buf = kmalloc(len, GFP_KERNEL);
+ if (*buf == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ memcpy(*buf, rcd->data, len - sizeof(*rcd));
*id = record_id;
if (uuid_le_cmp(rcd->sec_hdr.section_type,
CPER_SECTION_TYPE_DMESG) == 0)
@@ -1034,13 +1049,16 @@ skip:
time->tv_nsec = 0;
out:
+ kfree(rcd);
return (rc < 0) ? rc : (len - sizeof(*rcd));
}
-static u64 erst_writer(enum pstore_type_id type, size_t size)
+static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
+ size_t size, struct pstore_info *psi)
{
struct cper_pstore_record *rcd = (struct cper_pstore_record *)
(erst_info.buf - sizeof(*rcd));
+ int ret;
memset(rcd, 0, sizeof(*rcd));
memcpy(rcd->hdr.signature, CPER_SIG_RECORD, CPER_SIG_SIZE);
@@ -1075,9 +1093,16 @@ static u64 erst_writer(enum pstore_type_id type, size_t size)
}
rcd->sec_hdr.section_severity = CPER_SEV_FATAL;
- erst_write(&rcd->hdr);
+ ret = erst_write(&rcd->hdr);
+ *id = rcd->hdr.record_id;
- return rcd->hdr.record_id;
+ return ret;
+}
+
+static int erst_clearer(enum pstore_type_id type, u64 id,
+ struct pstore_info *psi)
+{
+ return erst_clear(id);
}
static int __init erst_init(void)
@@ -1155,7 +1180,7 @@ static int __init erst_init(void)
goto err_release_erange;
buf = kmalloc(erst_erange.size, GFP_KERNEL);
- mutex_init(&erst_info.buf_mutex);
+ spin_lock_init(&erst_info.buf_lock);
if (buf) {
erst_info.buf = buf + sizeof(struct cper_pstore_record);
erst_info.bufsize = erst_erange.size -
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index f703b28..b8e08cb 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -12,7 +12,7 @@
* For more information about Generic Hardware Error Source, please
* refer to ACPI Specification version 4.0, section 17.3.2.6
*
- * Copyright 2010 Intel Corp.
+ * Copyright 2010,2011 Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or
@@ -42,17 +42,45 @@
#include <linux/mutex.h>
#include <linux/ratelimit.h>
#include <linux/vmalloc.h>
+#include <linux/irq_work.h>
+#include <linux/llist.h>
+#include <linux/genalloc.h>
#include <acpi/apei.h>
#include <acpi/atomicio.h>
#include <acpi/hed.h>
#include <asm/mce.h>
#include <asm/tlbflush.h>
+#include <asm/nmi.h>
#include "apei-internal.h"
#define GHES_PFX "GHES: "
#define GHES_ESTATUS_MAX_SIZE 65536
+#define GHES_ESOURCE_PREALLOC_MAX_SIZE 65536
+
+#define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
+
+/* This is just an estimation for memory pool allocation */
+#define GHES_ESTATUS_CACHE_AVG_SIZE 512
+
+#define GHES_ESTATUS_CACHES_SIZE 4
+
+#define GHES_ESTATUS_IN_CACHE_MAX_NSEC 10000000000ULL
+/* Prevent too many caches are allocated because of RCU */
+#define GHES_ESTATUS_CACHE_ALLOCED_MAX (GHES_ESTATUS_CACHES_SIZE * 3 / 2)
+
+#define GHES_ESTATUS_CACHE_LEN(estatus_len) \
+ (sizeof(struct ghes_estatus_cache) + (estatus_len))
+#define GHES_ESTATUS_FROM_CACHE(estatus_cache) \
+ ((struct acpi_hest_generic_status *) \
+ ((struct ghes_estatus_cache *)(estatus_cache) + 1))
+
+#define GHES_ESTATUS_NODE_LEN(estatus_len) \
+ (sizeof(struct ghes_estatus_node) + (estatus_len))
+#define GHES_ESTATUS_FROM_NODE(estatus_node) \
+ ((struct acpi_hest_generic_status *) \
+ ((struct ghes_estatus_node *)(estatus_node) + 1))
/*
* One struct ghes is created for each generic hardware error source.
@@ -77,6 +105,22 @@ struct ghes {
};
};
+struct ghes_estatus_node {
+ struct llist_node llnode;
+ struct acpi_hest_generic *generic;
+};
+
+struct ghes_estatus_cache {
+ u32 estatus_len;
+ atomic_t count;
+ struct acpi_hest_generic *generic;
+ unsigned long long time_in;
+ struct rcu_head rcu;
+};
+
+int ghes_disable;
+module_param_named(disable, ghes_disable, bool, 0);
+
static int ghes_panic_timeout __read_mostly = 30;
/*
@@ -121,6 +165,22 @@ static struct vm_struct *ghes_ioremap_area;
static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
+/*
+ * printk is not safe in NMI context. So in NMI handler, we allocate
+ * required memory from lock-less memory allocator
+ * (ghes_estatus_pool), save estatus into it, put them into lock-less
+ * list (ghes_estatus_llist), then delay printk into IRQ context via
+ * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record
+ * required pool size by all NMI error source.
+ */
+static struct gen_pool *ghes_estatus_pool;
+static unsigned long ghes_estatus_pool_size_request;
+static struct llist_head ghes_estatus_llist;
+static struct irq_work ghes_proc_irq_work;
+
+struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
+static atomic_t ghes_estatus_cache_alloced;
+
static int ghes_ioremap_init(void)
{
ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
@@ -180,6 +240,55 @@ static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
__flush_tlb_one(vaddr);
}
+static int ghes_estatus_pool_init(void)
+{
+ ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
+ if (!ghes_estatus_pool)
+ return -ENOMEM;
+ return 0;
+}
+
+static void ghes_estatus_pool_free_chunk_page(struct gen_pool *pool,
+ struct gen_pool_chunk *chunk,
+ void *data)
+{
+ free_page(chunk->start_addr);
+}
+
+static void ghes_estatus_pool_exit(void)
+{
+ gen_pool_for_each_chunk(ghes_estatus_pool,
+ ghes_estatus_pool_free_chunk_page, NULL);
+ gen_pool_destroy(ghes_estatus_pool);
+}
+
+static int ghes_estatus_pool_expand(unsigned long len)
+{
+ unsigned long i, pages, size, addr;
+ int ret;
+
+ ghes_estatus_pool_size_request += PAGE_ALIGN(len);
+ size = gen_pool_size(ghes_estatus_pool);
+ if (size >= ghes_estatus_pool_size_request)
+ return 0;
+ pages = (ghes_estatus_pool_size_request - size) / PAGE_SIZE;
+ for (i = 0; i < pages; i++) {
+ addr = __get_free_page(GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+ ret = gen_pool_add(ghes_estatus_pool, addr, PAGE_SIZE, -1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ghes_estatus_pool_shrink(unsigned long len)
+{
+ ghes_estatus_pool_size_request -= PAGE_ALIGN(len);
+}
+
static struct ghes *ghes_new(struct acpi_hest_generic *generic)
{
struct ghes *ghes;
@@ -341,43 +450,196 @@ static void ghes_clear_estatus(struct ghes *ghes)
ghes->flags &= ~GHES_TO_CLEAR;
}
-static void ghes_do_proc(struct ghes *ghes)
+static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
{
- int sev, processed = 0;
+ int sev, sec_sev;
struct acpi_hest_generic_data *gdata;
- sev = ghes_severity(ghes->estatus->error_severity);
- apei_estatus_for_each_section(ghes->estatus, gdata) {
-#ifdef CONFIG_X86_MCE
+ sev = ghes_severity(estatus->error_severity);
+ apei_estatus_for_each_section(estatus, gdata) {
+ sec_sev = ghes_severity(gdata->error_severity);
if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
CPER_SEC_PLATFORM_MEM)) {
- apei_mce_report_mem_error(
- sev == GHES_SEV_CORRECTED,
- (struct cper_sec_mem_err *)(gdata+1));
- processed = 1;
- }
+ struct cper_sec_mem_err *mem_err;
+ mem_err = (struct cper_sec_mem_err *)(gdata+1);
+#ifdef CONFIG_X86_MCE
+ apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED,
+ mem_err);
#endif
+#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
+ if (sev == GHES_SEV_RECOVERABLE &&
+ sec_sev == GHES_SEV_RECOVERABLE &&
+ mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) {
+ unsigned long pfn;
+ pfn = mem_err->physical_addr >> PAGE_SHIFT;
+ memory_failure_queue(pfn, 0, 0);
+ }
+#endif
+ }
}
}
-static void ghes_print_estatus(const char *pfx, struct ghes *ghes)
+static void __ghes_print_estatus(const char *pfx,
+ const struct acpi_hest_generic *generic,
+ const struct acpi_hest_generic_status *estatus)
{
- /* Not more than 2 messages every 5 seconds */
- static DEFINE_RATELIMIT_STATE(ratelimit, 5*HZ, 2);
-
if (pfx == NULL) {
- if (ghes_severity(ghes->estatus->error_severity) <=
+ if (ghes_severity(estatus->error_severity) <=
GHES_SEV_CORRECTED)
pfx = KERN_WARNING HW_ERR;
else
pfx = KERN_ERR HW_ERR;
}
- if (__ratelimit(&ratelimit)) {
- printk(
- "%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
- pfx, ghes->generic->header.source_id);
- apei_estatus_print(pfx, ghes->estatus);
+ printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
+ pfx, generic->header.source_id);
+ apei_estatus_print(pfx, estatus);
+}
+
+static int ghes_print_estatus(const char *pfx,
+ const struct acpi_hest_generic *generic,
+ const struct acpi_hest_generic_status *estatus)
+{
+ /* Not more than 2 messages every 5 seconds */
+ static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
+ static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
+ struct ratelimit_state *ratelimit;
+
+ if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
+ ratelimit = &ratelimit_corrected;
+ else
+ ratelimit = &ratelimit_uncorrected;
+ if (__ratelimit(ratelimit)) {
+ __ghes_print_estatus(pfx, generic, estatus);
+ return 1;
}
+ return 0;
+}
+
+/*
+ * GHES error status reporting throttle, to report more kinds of
+ * errors, instead of just most frequently occurred errors.
+ */
+static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
+{
+ u32 len;
+ int i, cached = 0;
+ unsigned long long now;
+ struct ghes_estatus_cache *cache;
+ struct acpi_hest_generic_status *cache_estatus;
+
+ len = apei_estatus_len(estatus);
+ rcu_read_lock();
+ for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
+ cache = rcu_dereference(ghes_estatus_caches[i]);
+ if (cache == NULL)
+ continue;
+ if (len != cache->estatus_len)
+ continue;
+ cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
+ if (memcmp(estatus, cache_estatus, len))
+ continue;
+ atomic_inc(&cache->count);
+ now = sched_clock();
+ if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
+ cached = 1;
+ break;
+ }
+ rcu_read_unlock();
+ return cached;
+}
+
+static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
+ struct acpi_hest_generic *generic,
+ struct acpi_hest_generic_status *estatus)
+{
+ int alloced;
+ u32 len, cache_len;
+ struct ghes_estatus_cache *cache;
+ struct acpi_hest_generic_status *cache_estatus;
+
+ alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
+ if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
+ atomic_dec(&ghes_estatus_cache_alloced);
+ return NULL;
+ }
+ len = apei_estatus_len(estatus);
+ cache_len = GHES_ESTATUS_CACHE_LEN(len);
+ cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
+ if (!cache) {
+ atomic_dec(&ghes_estatus_cache_alloced);
+ return NULL;
+ }
+ cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
+ memcpy(cache_estatus, estatus, len);
+ cache->estatus_len = len;
+ atomic_set(&cache->count, 0);
+ cache->generic = generic;
+ cache->time_in = sched_clock();
+ return cache;
+}
+
+static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache)
+{
+ u32 len;
+
+ len = apei_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
+ len = GHES_ESTATUS_CACHE_LEN(len);
+ gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
+ atomic_dec(&ghes_estatus_cache_alloced);
+}
+
+static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
+{
+ struct ghes_estatus_cache *cache;
+
+ cache = container_of(head, struct ghes_estatus_cache, rcu);
+ ghes_estatus_cache_free(cache);
+}
+
+static void ghes_estatus_cache_add(
+ struct acpi_hest_generic *generic,
+ struct acpi_hest_generic_status *estatus)
+{
+ int i, slot = -1, count;
+ unsigned long long now, duration, period, max_period = 0;
+ struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache;
+
+ new_cache = ghes_estatus_cache_alloc(generic, estatus);
+ if (new_cache == NULL)
+ return;
+ rcu_read_lock();
+ now = sched_clock();
+ for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
+ cache = rcu_dereference(ghes_estatus_caches[i]);
+ if (cache == NULL) {
+ slot = i;
+ slot_cache = NULL;
+ break;
+ }
+ duration = now - cache->time_in;
+ if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
+ slot = i;
+ slot_cache = cache;
+ break;
+ }
+ count = atomic_read(&cache->count);
+ period = duration;
+ do_div(period, (count + 1));
+ if (period > max_period) {
+ max_period = period;
+ slot = i;
+ slot_cache = cache;
+ }
+ }
+ /* new_cache must be put into array after its contents are written */
+ smp_wmb();
+ if (slot != -1 && cmpxchg(ghes_estatus_caches + slot,
+ slot_cache, new_cache) == slot_cache) {
+ if (slot_cache)
+ call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free);
+ } else
+ ghes_estatus_cache_free(new_cache);
+ rcu_read_unlock();
}
static int ghes_proc(struct ghes *ghes)
@@ -387,9 +649,11 @@ static int ghes_proc(struct ghes *ghes)
rc = ghes_read_estatus(ghes, 0);
if (rc)
goto out;
- ghes_print_estatus(NULL, ghes);
- ghes_do_proc(ghes);
-
+ if (!ghes_estatus_cached(ghes->estatus)) {
+ if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus))
+ ghes_estatus_cache_add(ghes->generic, ghes->estatus);
+ }
+ ghes_do_proc(ghes->estatus);
out:
ghes_clear_estatus(ghes);
return 0;
@@ -447,15 +711,50 @@ static int ghes_notify_sci(struct notifier_block *this,
return ret;
}
-static int ghes_notify_nmi(struct notifier_block *this,
- unsigned long cmd, void *data)
+static void ghes_proc_in_irq(struct irq_work *irq_work)
+{
+ struct llist_node *llnode, *next, *tail = NULL;
+ struct ghes_estatus_node *estatus_node;
+ struct acpi_hest_generic *generic;
+ struct acpi_hest_generic_status *estatus;
+ u32 len, node_len;
+
+ /*
+ * Because the time order of estatus in list is reversed,
+ * revert it back to proper order.
+ */
+ llnode = llist_del_all(&ghes_estatus_llist);
+ while (llnode) {
+ next = llnode->next;
+ llnode->next = tail;
+ tail = llnode;
+ llnode = next;
+ }
+ llnode = tail;
+ while (llnode) {
+ next = llnode->next;
+ estatus_node = llist_entry(llnode, struct ghes_estatus_node,
+ llnode);
+ estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
+ len = apei_estatus_len(estatus);
+ node_len = GHES_ESTATUS_NODE_LEN(len);
+ ghes_do_proc(estatus);
+ if (!ghes_estatus_cached(estatus)) {
+ generic = estatus_node->generic;
+ if (ghes_print_estatus(NULL, generic, estatus))
+ ghes_estatus_cache_add(generic, estatus);
+ }
+ gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
+ node_len);
+ llnode = next;
+ }
+}
+
+static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
{
struct ghes *ghes, *ghes_global = NULL;
int sev, sev_global = -1;
- int ret = NOTIFY_DONE;
-
- if (cmd != DIE_NMI)
- return ret;
+ int ret = NMI_DONE;
raw_spin_lock(&ghes_nmi_lock);
list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
@@ -468,15 +767,16 @@ static int ghes_notify_nmi(struct notifier_block *this,
sev_global = sev;
ghes_global = ghes;
}
- ret = NOTIFY_STOP;
+ ret = NMI_HANDLED;
}
- if (ret == NOTIFY_DONE)
+ if (ret == NMI_DONE)
goto out;
if (sev_global >= GHES_SEV_PANIC) {
oops_begin();
- ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global);
+ __ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic,
+ ghes_global->estatus);
/* reboot to log the error! */
if (panic_timeout == 0)
panic_timeout = ghes_panic_timeout;
@@ -484,12 +784,34 @@ static int ghes_notify_nmi(struct notifier_block *this,
}
list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
+#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ u32 len, node_len;
+ struct ghes_estatus_node *estatus_node;
+ struct acpi_hest_generic_status *estatus;
+#endif
if (!(ghes->flags & GHES_TO_CLEAR))
continue;
- /* Do not print estatus because printk is not NMI safe */
- ghes_do_proc(ghes);
+#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ if (ghes_estatus_cached(ghes->estatus))
+ goto next;
+ /* Save estatus for further processing in IRQ context */
+ len = apei_estatus_len(ghes->estatus);
+ node_len = GHES_ESTATUS_NODE_LEN(len);
+ estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool,
+ node_len);
+ if (estatus_node) {
+ estatus_node->generic = ghes->generic;
+ estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
+ memcpy(estatus, ghes->estatus, len);
+ llist_add(&estatus_node->llnode, &ghes_estatus_llist);
+ }
+next:
+#endif
ghes_clear_estatus(ghes);
}
+#ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
+ irq_work_queue(&ghes_proc_irq_work);
+#endif
out:
raw_spin_unlock(&ghes_nmi_lock);
@@ -500,14 +822,26 @@ static struct notifier_block ghes_notifier_sci = {
.notifier_call = ghes_notify_sci,
};
-static struct notifier_block ghes_notifier_nmi = {
- .notifier_call = ghes_notify_nmi,
-};
+static unsigned long ghes_esource_prealloc_size(
+ const struct acpi_hest_generic *generic)
+{
+ unsigned long block_length, prealloc_records, prealloc_size;
+
+ block_length = min_t(unsigned long, generic->error_block_length,
+ GHES_ESTATUS_MAX_SIZE);
+ prealloc_records = max_t(unsigned long,
+ generic->records_to_preallocate, 1);
+ prealloc_size = min_t(unsigned long, block_length * prealloc_records,
+ GHES_ESOURCE_PREALLOC_MAX_SIZE);
+
+ return prealloc_size;
+}
static int __devinit ghes_probe(struct platform_device *ghes_dev)
{
struct acpi_hest_generic *generic;
struct ghes *ghes = NULL;
+ unsigned long len;
int rc = -EINVAL;
generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
@@ -573,9 +907,12 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev)
mutex_unlock(&ghes_list_mutex);
break;
case ACPI_HEST_NOTIFY_NMI:
+ len = ghes_esource_prealloc_size(generic);
+ ghes_estatus_pool_expand(len);
mutex_lock(&ghes_list_mutex);
if (list_empty(&ghes_nmi))
- register_die_notifier(&ghes_notifier_nmi);
+ register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0,
+ "ghes");
list_add_rcu(&ghes->list, &ghes_nmi);
mutex_unlock(&ghes_list_mutex);
break;
@@ -597,6 +934,7 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
{
struct ghes *ghes;
struct acpi_hest_generic *generic;
+ unsigned long len;
ghes = platform_get_drvdata(ghes_dev);
generic = ghes->generic;
@@ -620,13 +958,15 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev)
mutex_lock(&ghes_list_mutex);
list_del_rcu(&ghes->list);
if (list_empty(&ghes_nmi))
- unregister_die_notifier(&ghes_notifier_nmi);
+ unregister_nmi_handler(NMI_LOCAL, "ghes");
mutex_unlock(&ghes_list_mutex);
/*
* To synchronize with NMI handler, ghes can only be
* freed after NMI handler finishes.
*/
synchronize_rcu();
+ len = ghes_esource_prealloc_size(generic);
+ ghes_estatus_pool_shrink(len);
break;
default:
BUG();
@@ -662,15 +1002,43 @@ static int __init ghes_init(void)
return -EINVAL;
}
+ if (ghes_disable) {
+ pr_info(GHES_PFX "GHES is not enabled!\n");
+ return -EINVAL;
+ }
+
+ init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
+
rc = ghes_ioremap_init();
if (rc)
goto err;
- rc = platform_driver_register(&ghes_platform_driver);
+ rc = ghes_estatus_pool_init();
if (rc)
goto err_ioremap_exit;
+ rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE *
+ GHES_ESTATUS_CACHE_ALLOCED_MAX);
+ if (rc)
+ goto err_pool_exit;
+
+ rc = platform_driver_register(&ghes_platform_driver);
+ if (rc)
+ goto err_pool_exit;
+
+ rc = apei_osc_setup();
+ if (rc == 0 && osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
+ else if (rc == 0 && !osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
+ else if (rc && osc_sb_apei_support_acked)
+ pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
+ else
+ pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
+
return 0;
+err_pool_exit:
+ ghes_estatus_pool_exit();
err_ioremap_exit:
ghes_ioremap_exit();
err:
@@ -680,6 +1048,7 @@ err:
static void __exit ghes_exit(void)
{
platform_driver_unregister(&ghes_platform_driver);
+ ghes_estatus_pool_exit();
ghes_ioremap_exit();
}
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 181bc2f..05fee06 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -231,16 +231,17 @@ void __init acpi_hest_init(void)
goto err;
}
- rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count);
- if (rc)
- goto err;
-
- rc = hest_ghes_dev_register(ghes_count);
- if (!rc) {
- pr_info(HEST_PFX "Table parsing has been initialized.\n");
- return;
+ if (!ghes_disable) {
+ rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count);
+ if (rc)
+ goto err;
+ rc = hest_ghes_dev_register(ghes_count);
+ if (rc)
+ goto err;
}
+ pr_info(HEST_PFX "Table parsing has been initialized.\n");
+ return;
err:
hest_disable = 1;
}
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index d74926e..bd230e8 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -365,6 +365,40 @@ static int amba_pm_restore_noirq(struct device *dev)
#endif /* !CONFIG_HIBERNATE_CALLBACKS */
+#ifdef CONFIG_PM_RUNTIME
+/*
+ * Hooks to provide runtime PM of the pclk (bus clock). It is safe to
+ * enable/disable the bus clock at runtime PM suspend/resume as this
+ * does not result in loss of context. However, disabling vcore power
+ * would do, so we leave that to the driver.
+ */
+static int amba_pm_runtime_suspend(struct device *dev)
+{
+ struct amba_device *pcdev = to_amba_device(dev);
+ int ret = pm_generic_runtime_suspend(dev);
+
+ if (ret == 0 && dev->driver)
+ clk_disable(pcdev->pclk);
+
+ return ret;
+}
+
+static int amba_pm_runtime_resume(struct device *dev)
+{
+ struct amba_device *pcdev = to_amba_device(dev);
+ int ret;
+
+ if (dev->driver) {
+ ret = clk_enable(pcdev->pclk);
+ /* Failure is probably fatal to the system, but... */
+ if (ret)
+ return ret;
+ }
+
+ return pm_generic_runtime_resume(dev);
+}
+#endif
+
#ifdef CONFIG_PM
static const struct dev_pm_ops amba_pm = {
@@ -383,8 +417,8 @@ static const struct dev_pm_ops amba_pm = {
.poweroff_noirq = amba_pm_poweroff_noirq,
.restore_noirq = amba_pm_restore_noirq,
SET_RUNTIME_PM_OPS(
- pm_generic_runtime_suspend,
- pm_generic_runtime_resume,
+ amba_pm_runtime_suspend,
+ amba_pm_runtime_resume,
pm_generic_runtime_idle
)
};
@@ -426,9 +460,17 @@ static int amba_get_enable_pclk(struct amba_device *pcdev)
if (IS_ERR(pclk))
return PTR_ERR(pclk);
+ ret = clk_prepare(pclk);
+ if (ret) {
+ clk_put(pclk);
+ return ret;
+ }
+
ret = clk_enable(pclk);
- if (ret)
+ if (ret) {
+ clk_unprepare(pclk);
clk_put(pclk);
+ }
return ret;
}
@@ -438,6 +480,7 @@ static void amba_put_disable_pclk(struct amba_device *pcdev)
struct clk *pclk = pcdev->pclk;
clk_disable(pclk);
+ clk_unprepare(pclk);
clk_put(pclk);
}
@@ -494,10 +537,18 @@ static int amba_probe(struct device *dev)
if (ret)
break;
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
ret = pcdrv->probe(pcdev, id);
if (ret == 0)
break;
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+
amba_put_disable_pclk(pcdev);
amba_put_disable_vcore(pcdev);
} while (0);
@@ -509,7 +560,16 @@ static int amba_remove(struct device *dev)
{
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *drv = to_amba_driver(dev->driver);
- int ret = drv->remove(pcdev);
+ int ret;
+
+ pm_runtime_get_sync(dev);
+ ret = drv->remove(pcdev);
+ pm_runtime_put_noidle(dev);
+
+ /* Undo the runtime PM settings in amba_probe() */
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
amba_put_disable_pclk(pcdev);
amba_put_disable_vcore(pcdev);
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 34575fb..e7b3a9e 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -313,6 +313,7 @@ config PATA_AMD
config PATA_ARASAN_CF
tristate "ARASAN CompactFlash PATA Controller Support"
+ depends on DMADEVICES
select DMA_ENGINE
help
Say Y here to support the ARASAN CompactFlash PATA controller
@@ -467,6 +468,15 @@ config PATA_ICSIDE
interface card. This is not required for ICS partition support.
If you are unsure, say N to this.
+config PATA_IMX
+ tristate "PATA support for Freescale iMX"
+ depends on ARCH_MXC
+ help
+ This option enables support for the PATA host available on Freescale
+ iMX SoCs.
+
+ If unsure, say N.
+
config PATA_IT8213
tristate "IT8213 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
@@ -810,7 +820,7 @@ config PATA_PLATFORM
config PATA_OF_PLATFORM
tristate "OpenFirmware platform device PATA support"
- depends on PATA_PLATFORM && PPC_OF
+ depends on PATA_PLATFORM && OF && OF_IRQ
help
This option enables support for generic directly connected ATA
devices commonly found on embedded systems with OpenFirmware
@@ -821,6 +831,7 @@ config PATA_OF_PLATFORM
config PATA_QDI
tristate "QDI VLB PATA support"
depends on ISA
+ select PATA_LEGACY
help
Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 8ac64e1..6ece5b7 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o
obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o
obj-$(CONFIG_PATA_ICSIDE) += pata_icside.o
+obj-$(CONFIG_PATA_IMX) += pata_imx.o
obj-$(CONFIG_PATA_IT8213) += pata_it8213.o
obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
@@ -87,7 +88,6 @@ obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o
obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
-obj-$(CONFIG_PATA_QDI) += pata_qdi.o
obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
obj-$(CONFIG_PATA_SAMSUNG_CF) += pata_samsung_cf.o
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index ae22be4..3bc8c79 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -135,8 +135,8 @@ static int acard_ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg
if (mesg.event & PM_EVENT_SUSPEND &&
hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
- dev_printk(KERN_ERR, &pdev->dev,
- "BIOS update required for suspend/resume\n");
+ dev_err(&pdev->dev,
+ "BIOS update required for suspend/resume\n");
return -EIO;
}
@@ -187,7 +187,7 @@ static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
+ dev_err(&pdev->dev,
"64-bit DMA enable failed\n");
return rc;
}
@@ -195,14 +195,13 @@ static int acard_ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
+ dev_err(&pdev->dev,
+ "32-bit consistent DMA enable failed\n");
return rc;
}
}
@@ -343,14 +342,12 @@ static int acard_ahci_port_start(struct ata_port *ap)
if (cmd & PORT_CMD_FBSCP)
pp->fbs_supported = true;
else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
- dev_printk(KERN_INFO, dev,
- "port %d can do FBS, forcing FBSCP\n",
- ap->port_no);
+ dev_info(dev, "port %d can do FBS, forcing FBSCP\n",
+ ap->port_no);
pp->fbs_supported = true;
} else
- dev_printk(KERN_WARNING, dev,
- "port %d is not capable of FBS\n",
- ap->port_no);
+ dev_warn(dev, "port %d is not capable of FBS\n",
+ ap->port_no);
}
if (pp->fbs_supported) {
@@ -406,7 +403,6 @@ static int acard_ahci_port_start(struct ata_port *ap)
static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
unsigned int board_id = ent->driver_data;
struct ata_port_info pi = acard_ahci_port_info[board_id];
const struct ata_port_info *ppi[] = { &pi, NULL };
@@ -419,8 +415,7 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id
WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* acquire resources */
rc = pcim_enable_device(pdev);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index f3d09f3..e2958aa 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -52,17 +52,22 @@
#define DRV_VERSION "3.0"
enum {
- AHCI_PCI_BAR = 5,
+ AHCI_PCI_BAR_STA2X11 = 0,
+ AHCI_PCI_BAR_ENMOTUS = 2,
+ AHCI_PCI_BAR_STANDARD = 5,
};
enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
board_ahci_ign_iferr,
+ board_ahci_nomsi,
+ board_ahci_noncq,
board_ahci_nosntf,
board_ahci_yes_fbs,
/* board IDs for specific chipsets in alphabetical order */
+ board_ahci_avn,
board_ahci_mcp65,
board_ahci_mcp77,
board_ahci_mcp89,
@@ -79,10 +84,10 @@ enum board_ids {
};
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
-static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline);
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
+static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
#ifdef CONFIG_PM
@@ -104,14 +109,13 @@ static struct ata_port_operations ahci_p5wdh_ops = {
.hardreset = ahci_p5wdh_hardreset,
};
-static struct ata_port_operations ahci_sb600_ops = {
+#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
+
+static struct ata_port_operations ahci_avn_ops = {
.inherits = &ahci_ops,
- .softreset = ahci_sb600_softreset,
- .pmp_softreset = ahci_sb600_softreset,
+ .hardreset = ahci_avn_hardreset,
};
-#define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
-
static const struct ata_port_info ahci_port_info[] = {
/* by features */
[board_ahci] =
@@ -129,6 +133,20 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_nomsi] = {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
+ [board_ahci_noncq] = {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_nosntf] =
{
AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
@@ -146,6 +164,12 @@ static const struct ata_port_info ahci_port_info[] = {
.port_ops = &ahci_ops,
},
/* by chipsets */
+ [board_ahci_avn] = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_avn_ops,
+ },
[board_ahci_mcp65] =
{
AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
@@ -188,7 +212,7 @@ static const struct ata_port_info ahci_port_info[] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_sb600_ops,
+ .port_ops = &ahci_pmp_retry_srst_ops,
},
[board_ahci_sb700] = /* for SB700 and SB800 */
{
@@ -196,7 +220,7 @@ static const struct ata_port_info ahci_port_info[] = {
.flags = AHCI_FLAG_COMMON,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
- .port_ops = &ahci_sb600_ops,
+ .port_ops = &ahci_pmp_retry_srst_ops,
},
[board_ahci_vt8251] =
{
@@ -268,6 +292,67 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1e06), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e07), board_ahci }, /* Panther Point RAID */
{ PCI_VDEVICE(INTEL, 0x1e0e), board_ahci }, /* Panther Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c02), board_ahci }, /* Lynx Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c03), board_ahci }, /* Lynx Point AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c04), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c05), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c06), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */
+ { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x1f22), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f23), board_ahci }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f24), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f25), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f26), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */
+ { PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f35), board_ahci_avn }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f36), board_ahci_avn }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
+ { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
+ { PCI_VDEVICE(INTEL, 0x8d04), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d06), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d0e), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d62), board_ahci }, /* Wellsburg AHCI */
+ { PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c83), board_ahci }, /* Wildcat Point-LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */
+ { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
+ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
+ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -384,24 +469,41 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
{ PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
+ /* ST Microelectronics */
+ { PCI_VDEVICE(STMICRO, 0xCC06), board_ahci }, /* ST ConneXt */
+
/* Marvell */
{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
- { PCI_DEVICE(0x1b4b, 0x9123),
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9123),
.class = PCI_CLASS_STORAGE_SATA_AHCI,
.class_mask = 0xffffff,
.driver_data = board_ahci_yes_fbs }, /* 88se9128 */
- { PCI_DEVICE(0x1b4b, 0x9125),
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
.driver_data = board_ahci_yes_fbs }, /* 88se9125 */
- { PCI_DEVICE(0x1b4b, 0x917a),
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178,
+ PCI_VENDOR_ID_MARVELL_EXT, 0x9170),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9170 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
- { PCI_DEVICE(0x1b4b, 0x9192),
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9182 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9172 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */
- { PCI_DEVICE(0x1b4b, 0x91a3),
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0),
+ .driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
+ .driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
+ .driver_data = board_ahci_yes_fbs },
+ { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642),
.driver_data = board_ahci_yes_fbs },
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
+ { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
/* Asmedia */
{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
@@ -409,6 +511,16 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+ /*
+ * Samsung SSDs found on some macbooks. NCQ times out if MSI is
+ * enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731
+ */
+ { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
+ { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi },
+
+ /* Enmotus */
+ { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
+
/* Generic, PCI class code for AHCI */
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
@@ -514,55 +626,6 @@ static void ahci_pci_init_controller(struct ata_host *host)
ahci_init_controller(host);
}
-static int ahci_sb600_check_ready(struct ata_link *link)
-{
- void __iomem *port_mmio = ahci_port_base(link->ap);
- u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
- u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
-
- /*
- * There is no need to check TFDATA if BAD PMP is found due to HW bug,
- * which can save timeout delay.
- */
- if (irq_status & PORT_IRQ_BAD_PMP)
- return -EIO;
-
- return ata_check_ready(status);
-}
-
-static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
-{
- struct ata_port *ap = link->ap;
- void __iomem *port_mmio = ahci_port_base(ap);
- int pmp = sata_srst_pmp(link);
- int rc;
- u32 irq_sts;
-
- DPRINTK("ENTER\n");
-
- rc = ahci_do_softreset(link, class, pmp, deadline,
- ahci_sb600_check_ready);
-
- /*
- * Soft reset fails on some ATI chips with IPMS set when PMP
- * is enabled but SATA HDD/ODD is connected to SATA port,
- * do soft reset again to port 0.
- */
- if (rc == -EIO) {
- irq_sts = readl(port_mmio + PORT_IRQ_STAT);
- if (irq_sts & PORT_IRQ_BAD_PMP) {
- ata_link_printk(link, KERN_WARNING,
- "applying SB600 PMP SRST workaround "
- "and retrying\n");
- rc = ahci_do_softreset(link, class, 0, deadline,
- ahci_check_ready);
- }
- }
-
- return rc;
-}
-
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
@@ -631,6 +694,78 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
return rc;
}
+/*
+ * ahci_avn_hardreset - attempt more aggressive recovery of Avoton ports.
+ *
+ * It has been observed with some SSDs that the timing of events in the
+ * link synchronization phase can leave the port in a state that can not
+ * be recovered by a SATA-hard-reset alone. The failing signature is
+ * SStatus.DET stuck at 1 ("Device presence detected but Phy
+ * communication not established"). It was found that unloading and
+ * reloading the driver when this problem occurs allows the drive
+ * connection to be recovered (DET advanced to 0x3). The critical
+ * component of reloading the driver is that the port state machines are
+ * reset by bouncing "port enable" in the AHCI PCS configuration
+ * register. So, reproduce that effect by bouncing a port whenever we
+ * see DET==1 after a reset.
+ */
+static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ struct ata_port *ap = link->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+ u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ unsigned long tmo = deadline - jiffies;
+ struct ata_taskfile tf;
+ bool online;
+ int rc, i;
+
+ DPRINTK("ENTER\n");
+
+ ahci_stop_engine(ap);
+
+ for (i = 0; i < 2; i++) {
+ u16 val;
+ u32 sstatus;
+ int port = ap->port_no;
+ struct ata_host *host = ap->host;
+ struct pci_dev *pdev = to_pci_dev(host->dev);
+
+ /* clear D2H reception area to properly wait for D2H FIS */
+ ata_tf_init(link->device, &tf);
+ tf.command = ATA_BUSY;
+ ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+ rc = sata_link_hardreset(link, timing, deadline, &online,
+ ahci_check_ready);
+
+ if (sata_scr_read(link, SCR_STATUS, &sstatus) != 0 ||
+ (sstatus & 0xf) != 1)
+ break;
+
+ ata_link_printk(link, KERN_INFO, "avn bounce port%d\n",
+ port);
+
+ pci_read_config_word(pdev, 0x92, &val);
+ val &= ~(1 << port);
+ pci_write_config_word(pdev, 0x92, val);
+ ata_msleep(ap, 1000);
+ val |= 1 << port;
+ pci_write_config_word(pdev, 0x92, val);
+ deadline += tmo;
+ }
+
+ ahci_start_engine(ap);
+
+ if (online)
+ *class = ahci_dev_classify(ap);
+
+ DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+ return rc;
+}
+
+
#ifdef CONFIG_PM
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
@@ -641,8 +776,8 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
if (mesg.event & PM_EVENT_SUSPEND &&
hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
- dev_printk(KERN_ERR, &pdev->dev,
- "BIOS update required for suspend/resume\n");
+ dev_err(&pdev->dev,
+ "BIOS update required for suspend/resume\n");
return -EIO;
}
@@ -687,28 +822,34 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
{
int rc;
+ /*
+ * If the device fixup already set the dma_mask to some non-standard
+ * value, don't extend it here. This happens on STA2X11, for example.
+ */
+ if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32))
+ return 0;
+
if (using_dac &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "64-bit DMA enable failed\n");
+ dev_err(&pdev->dev,
+ "64-bit DMA enable failed\n");
return rc;
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
+ dev_err(&pdev->dev,
+ "32-bit consistent DMA enable failed\n");
return rc;
}
}
@@ -771,8 +912,8 @@ static void ahci_p5wdh_workaround(struct ata_host *host)
dmi_check_system(sysids)) {
struct ata_port *ap = host->ports[1];
- dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
- "Deluxe on-board SIMG4726 workaround\n");
+ dev_info(&pdev->dev,
+ "enabling ASUS P5W DH Deluxe on-board SIMG4726 workaround\n");
ap->ops = &ahci_p5wdh_ops;
ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
@@ -855,14 +996,14 @@ static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
if (strcmp(buf, match->driver_data) >= 0)
goto enable_64bit;
else {
- dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
- "forcing 32bit DMA, update BIOS\n", match->ident);
+ dev_warn(&pdev->dev,
+ "%s: BIOS too old, forcing 32bit DMA, update BIOS\n",
+ match->ident);
return false;
}
enable_64bit:
- dev_printk(KERN_WARNING, &pdev->dev, "%s: enabling 64bit DMA\n",
- match->ident);
+ dev_warn(&pdev->dev, "%s: enabling 64bit DMA\n", match->ident);
return true;
}
@@ -1065,9 +1206,8 @@ static void ahci_gtf_filter_workaround(struct ata_host *host)
return;
filter = (unsigned long)dmi->driver_data;
- dev_printk(KERN_INFO, host->dev,
- "applying extra ACPI _GTF filter 0x%x for %s\n",
- filter, dmi->ident);
+ dev_info(host->dev, "applying extra ACPI _GTF filter 0x%x for %s\n",
+ filter, dmi->ident);
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
@@ -1086,7 +1226,6 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
unsigned int board_id = ent->driver_data;
struct ata_port_info pi = ahci_port_info[board_id];
const struct ata_port_info *ppi[] = { &pi, NULL };
@@ -1094,13 +1233,13 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ahci_host_priv *hpriv;
struct ata_host *host;
int n_ports, i, rc;
+ int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
VPRINTK("ENTER\n");
WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* The AHCI driver can only drive the SATA ports, the PATA driver
can drive them all so if both drivers are selected make sure
@@ -1123,8 +1262,14 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* that for SAS drives they're out of luck.
*/
if (pdev->vendor == PCI_VENDOR_ID_PROMISE)
- dev_printk(KERN_INFO, &pdev->dev, "PDC42819 "
- "can only drive SATA devices with this driver\n");
+ dev_info(&pdev->dev,
+ "PDC42819 can only drive SATA devices with this driver\n");
+
+ /* Both Connext and Enmotus devices use non-standard BARs */
+ if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
+ ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
+ else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
+ ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
/* acquire resources */
rc = pcim_enable_device(pdev);
@@ -1134,7 +1279,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* AHCI controllers often implement SFF compatible interface.
* Grab all PCI BARs just in case.
*/
- rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
+ rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
@@ -1150,8 +1295,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
pci_read_config_byte(pdev, ICH_MAP, &map);
if (map & 0x3) {
- dev_printk(KERN_INFO, &pdev->dev, "controller is in "
- "combined mode, can't enable AHCI mode\n");
+ dev_info(&pdev->dev,
+ "controller is in combined mode, can't enable AHCI mode\n");
return -ENODEV;
}
}
@@ -1177,7 +1322,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
pci_intx(pdev, 1);
- hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+ hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);
@@ -1208,8 +1353,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ahci_broken_suspend(pdev)) {
hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
- dev_printk(KERN_WARNING, &pdev->dev,
- "BIOS update required for suspend/resume\n");
+ dev_warn(&pdev->dev,
+ "BIOS update required for suspend/resume\n");
}
if (ahci_broken_online(pdev)) {
@@ -1241,8 +1386,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
- ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
- ata_port_pbar_desc(ap, AHCI_PCI_BAR,
+ ata_port_pbar_desc(ap, ahci_pci_bar, -1, "abar");
+ ata_port_pbar_desc(ap, ahci_pci_bar,
0x100 + ap->port_no * 0x80, "port");
/* set enclosure management message type */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 12c5282..cdf58f7 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -312,7 +312,9 @@ extern struct device_attribute *ahci_sdev_attrs[];
.sdev_attrs = ahci_sdev_attrs
extern struct ata_port_operations ahci_ops;
+extern struct ata_port_operations ahci_pmp_retry_srst_ops;
+unsigned int ahci_dev_classify(struct ata_port *ap);
void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
u32 opts);
void ahci_save_initial_config(struct device *dev,
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 6fef1fa..6692108 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -23,6 +23,41 @@
#include <linux/ahci_platform.h>
#include "ahci.h"
+enum ahci_type {
+ AHCI, /* standard platform ahci */
+ IMX53_AHCI, /* ahci on i.mx53 */
+};
+
+static struct platform_device_id ahci_devtype[] = {
+ {
+ .name = "ahci",
+ .driver_data = AHCI,
+ }, {
+ .name = "imx53-ahci",
+ .driver_data = IMX53_AHCI,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, ahci_devtype);
+
+
+static const struct ata_port_info ahci_port_info[] = {
+ /* by features */
+ [AHCI] = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
+ [IMX53_AHCI] = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_pmp_retry_srst_ops,
+ },
+};
+
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT("ahci_platform"),
};
@@ -30,13 +65,9 @@ static struct scsi_host_template ahci_platform_sht = {
static int __init ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct ahci_platform_data *pdata = dev->platform_data;
- struct ata_port_info pi = {
- .flags = AHCI_FLAG_COMMON,
- .pio_mask = ATA_PIO4,
- .udma_mask = ATA_UDMA6,
- .port_ops = &ahci_ops,
- };
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+ struct ata_port_info pi = ahci_port_info[id ? id->driver_data : 0];
const struct ata_port_info *ppi[] = { &pi, NULL };
struct ahci_host_priv *hpriv;
struct ata_host *host;
@@ -160,7 +191,7 @@ err0:
static int __devexit ahci_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct ahci_platform_data *pdata = dev->platform_data;
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
struct ata_host *host = dev_get_drvdata(dev);
ata_host_detach(host);
@@ -171,12 +202,21 @@ static int __devexit ahci_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ahci_of_match[] = {
+ { .compatible = "calxeda,hb-ahci", },
+ { .compatible = "ibm,476gtr-ahci", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ahci_of_match);
+
static struct platform_driver ahci_driver = {
.remove = __devexit_p(ahci_remove),
.driver = {
.name = "ahci",
.owner = THIS_MODULE,
+ .of_match_table = ahci_of_match,
},
+ .id_table = ahci_devtype,
};
static int __init ahci_init(void)
diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
index 721d38b..7df56ec 100644
--- a/drivers/ata/ata_generic.c
+++ b/drivers/ata/ata_generic.c
@@ -81,14 +81,13 @@ static int generic_set_mode(struct ata_link *link, struct ata_device **unused)
xfer_mask |= ata_xfer_mode2mask(XFER_MW_DMA_0);
}
- ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
- name);
+ ata_dev_info(dev, "configured for %s\n", name);
dev->xfer_mode = ata_xfer_mask2mode(xfer_mask);
dev->xfer_shift = ata_xfer_mode2shift(dev->xfer_mode);
dev->flags &= ~ATA_DFLAG_PIO;
} else {
- ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ ata_dev_info(dev, "configured for PIO\n");
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 6da6deb..b1e8e11 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -150,6 +150,8 @@ enum piix_controller_ids {
tolapai_sata,
piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
ich8_sata_snb,
+ ich8_2port_sata_snb,
+ ich8_2port_sata_byt,
};
struct piix_map_db {
@@ -321,6 +323,54 @@ static const struct pci_device_id piix_pci_tbl[] = {
{ 0x8086, 0x1e08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Panther Point) */
{ 0x8086, 0x1e09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Lynx Point) */
+ { 0x8086, 0x8c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Lynx Point) */
+ { 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Lynx Point) */
+ { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+ /* SATA Controller IDE (Lynx Point) */
+ { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Lynx Point-LP) */
+ { 0x8086, 0x9c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Lynx Point-LP) */
+ { 0x8086, 0x9c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Lynx Point-LP) */
+ { 0x8086, 0x9c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Lynx Point-LP) */
+ { 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (DH89xxCC) */
+ { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Avoton) */
+ { 0x8086, 0x1f20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Avoton) */
+ { 0x8086, 0x1f21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Avoton) */
+ { 0x8086, 0x1f30, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Avoton) */
+ { 0x8086, 0x1f31, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (Wellsburg) */
+ { 0x8086, 0x8d68, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (BayTrail) */
+ { 0x8086, 0x0F20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
+ { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt },
+ /* SATA Controller IDE (Coleto Creek) */
+ { 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ /* SATA Controller IDE (9 Series) */
+ { 0x8086, 0x8c88, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+ /* SATA Controller IDE (9 Series) */
+ { 0x8086, 0x8c89, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+ /* SATA Controller IDE (9 Series) */
+ { 0x8086, 0x8c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+ /* SATA Controller IDE (9 Series) */
+ { 0x8086, 0x8c81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
+
{ } /* terminate list */
};
@@ -484,6 +534,8 @@ static const struct piix_map_db *piix_map_db_table[] = {
[ich8m_apple_sata] = &ich8m_apple_map_db,
[tolapai_sata] = &tolapai_map_db,
[ich8_sata_snb] = &ich8_map_db,
+ [ich8_2port_sata_snb] = &ich8_2port_map_db,
+ [ich8_2port_sata_byt] = &ich8_2port_map_db,
};
static struct ata_port_info piix_port_info[] = {
@@ -625,6 +677,25 @@ static struct ata_port_info piix_port_info[] = {
.port_ops = &piix_sata_ops,
},
+ [ich8_2port_sata_snb] =
+ {
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR
+ | PIIX_FLAG_PIO16,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
+ [ich8_2port_sata_byt] =
+ {
+ .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR | PIIX_FLAG_PIO16,
+ .pio_mask = ATA_PIO4,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &piix_sata_ops,
+ },
+
};
static struct pci_bits piix_enable_bits[] = {
@@ -731,22 +802,11 @@ static int piix_pata_prereset(struct ata_link *link, unsigned long deadline)
static DEFINE_SPINLOCK(piix_lock);
-/**
- * piix_set_piomode - Initialize host controller PATA PIO timings
- * @ap: Port whose timings we are configuring
- * @adev: um
- *
- * Set PIO mode for device, in host controller PCI config space.
- *
- * LOCKING:
- * None (inherited from caller).
- */
-
-static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
+static void piix_set_timings(struct ata_port *ap, struct ata_device *adev,
+ u8 pio)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned long flags;
- unsigned int pio = adev->pio_mode - XFER_PIO_0;
unsigned int is_slave = (adev->devno != 0);
unsigned int master_port= ap->port_no ? 0x42 : 0x40;
unsigned int slave_port = 0x44;
@@ -771,10 +831,16 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
control |= 1; /* TIME1 enable */
if (ata_pio_need_iordy(adev))
control |= 2; /* IE enable */
-
/* Intel specifies that the PPE functionality is for disk only */
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE enable */
+ /*
+ * If the drive MWDMA is faster than it can do PIO then
+ * we must force PIO into PIO0
+ */
+ if (adev->pio_mode < XFER_PIO_0 + pio)
+ /* Enable DMA timing only */
+ control |= 8; /* PIO cycles in PIO0 */
spin_lock_irqsave(&piix_lock, flags);
@@ -786,8 +852,6 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
if (is_slave) {
/* clear TIME1|IE1|PPE1|DTE1 */
master_data &= 0xff0f;
- /* Enable SITRE (separate slave timing register) */
- master_data |= 0x4000;
/* enable PPE1, IE1 and TIME1 as needed */
master_data |= (control << 4);
pci_read_config_byte(dev, slave_port, &slave_data);
@@ -805,6 +869,9 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
}
+
+ /* Enable SITRE (separate slave timing register) */
+ master_data |= 0x4000;
pci_write_config_word(dev, master_port, master_data);
if (is_slave)
pci_write_config_byte(dev, slave_port, slave_data);
@@ -822,6 +889,22 @@ static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
}
/**
+ * piix_set_piomode - Initialize host controller PATA PIO timings
+ * @ap: Port whose timings we are configuring
+ * @adev: Drive in question
+ *
+ * Set PIO mode for device, in host controller PCI config space.
+ *
+ * LOCKING:
+ * None (inherited from caller).
+ */
+
+static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ piix_set_timings(ap, adev, adev->pio_mode - XFER_PIO_0);
+}
+
+/**
* do_pata_set_dmamode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Drive in question
@@ -837,31 +920,20 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned long flags;
- u8 master_port = ap->port_no ? 0x42 : 0x40;
- u16 master_data;
u8 speed = adev->dma_mode;
int devid = adev->devno + 2 * ap->port_no;
u8 udma_enable = 0;
- static const /* ISP RTC */
- u8 timings[][2] = { { 0, 0 },
- { 0, 0 },
- { 1, 0 },
- { 2, 1 },
- { 2, 3 }, };
-
- spin_lock_irqsave(&piix_lock, flags);
-
- pci_read_config_word(dev, master_port, &master_data);
- if (ap->udma_mask)
- pci_read_config_byte(dev, 0x48, &udma_enable);
-
if (speed >= XFER_UDMA_0) {
- unsigned int udma = adev->dma_mode - XFER_UDMA_0;
+ unsigned int udma = speed - XFER_UDMA_0;
u16 udma_timing;
u16 ideconf;
int u_clock, u_speed;
+ spin_lock_irqsave(&piix_lock, flags);
+
+ pci_read_config_byte(dev, 0x48, &udma_enable);
+
/*
* UDMA is handled by a combination of clock switching and
* selection of dividers
@@ -894,56 +966,21 @@ static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, in
performance (WR_PingPong_En) */
pci_write_config_word(dev, 0x54, ideconf);
}
+
+ pci_write_config_byte(dev, 0x48, udma_enable);
+
+ spin_unlock_irqrestore(&piix_lock, flags);
} else {
- /*
- * MWDMA is driven by the PIO timings. We must also enable
- * IORDY unconditionally along with TIME1. PPE has already
- * been set when the PIO timing was set.
- */
- unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
- unsigned int control;
- u8 slave_data;
+ /* MWDMA is driven by the PIO timings. */
+ unsigned int mwdma = speed - XFER_MW_DMA_0;
const unsigned int needed_pio[3] = {
XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
};
int pio = needed_pio[mwdma] - XFER_PIO_0;
- control = 3; /* IORDY|TIME1 */
-
- /* If the drive MWDMA is faster than it can do PIO then
- we must force PIO into PIO0 */
-
- if (adev->pio_mode < needed_pio[mwdma])
- /* Enable DMA timing only */
- control |= 8; /* PIO cycles in PIO0 */
-
- if (adev->devno) { /* Slave */
- master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
- master_data |= control << 4;
- pci_read_config_byte(dev, 0x44, &slave_data);
- slave_data &= (ap->port_no ? 0x0f : 0xf0);
- /* Load the matching timing */
- slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
- pci_write_config_byte(dev, 0x44, slave_data);
- } else { /* Master */
- master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
- and master timing bits */
- master_data |= control;
- master_data |=
- (timings[pio][0] << 12) |
- (timings[pio][1] << 8);
- }
-
- if (ap->udma_mask)
- udma_enable &= ~(1 << devid);
-
- pci_write_config_word(dev, master_port, master_data);
+ /* XFER_PIO_0 is never used currently */
+ piix_set_timings(ap, adev, pio);
}
- /* Don't scribble on 0x48 if the controller does not support UDMA */
- if (ap->udma_mask)
- pci_write_config_byte(dev, 0x48, udma_enable);
-
- spin_unlock_irqrestore(&piix_lock, flags);
}
/**
@@ -1252,8 +1289,9 @@ static int piix_pci_device_resume(struct pci_dev *pdev)
*/
rc = pci_reenable_device(pdev);
if (rc)
- dev_printk(KERN_ERR, &pdev->dev, "failed to enable "
- "device after resume (%d)\n", rc);
+ dev_err(&pdev->dev,
+ "failed to enable device after resume (%d)\n",
+ rc);
} else
rc = ata_pci_device_do_resume(pdev);
@@ -1330,9 +1368,11 @@ static int __devinit piix_check_450nx_errata(struct pci_dev *ata_dev)
no_piix_dma = 2;
}
if (no_piix_dma)
- dev_printk(KERN_WARNING, &ata_dev->dev, "450NX errata present, disabling IDE DMA.\n");
- if (no_piix_dma == 2)
- dev_printk(KERN_WARNING, &ata_dev->dev, "A BIOS update may resolve this.\n");
+ dev_warn(&ata_dev->dev,
+ "450NX errata present, disabling IDE DMA%s\n",
+ no_piix_dma == 2 ? " - a BIOS update may resolve this"
+ : "");
+
return no_piix_dma;
}
@@ -1365,37 +1405,36 @@ static const int *__devinit piix_init_sata_map(struct pci_dev *pdev,
map = map_db->map[map_value & map_db->mask];
- dev_printk(KERN_INFO, &pdev->dev, "MAP [");
+ dev_info(&pdev->dev, "MAP [");
for (i = 0; i < 4; i++) {
switch (map[i]) {
case RV:
invalid_map = 1;
- printk(" XX");
+ pr_cont(" XX");
break;
case NA:
- printk(" --");
+ pr_cont(" --");
break;
case IDE:
WARN_ON((i & 1) || map[i + 1] != IDE);
pinfo[i / 2] = piix_port_info[ich_pata_100];
i++;
- printk(" IDE IDE");
+ pr_cont(" IDE IDE");
break;
default:
- printk(" P%d", map[i]);
+ pr_cont(" P%d", map[i]);
if (i & 1)
pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
break;
}
}
- printk(" ]\n");
+ pr_cont(" ]\n");
if (invalid_map)
- dev_printk(KERN_ERR, &pdev->dev,
- "invalid MAP value %u\n", map_value);
+ dev_err(&pdev->dev, "invalid MAP value %u\n", map_value);
return map;
}
@@ -1425,8 +1464,8 @@ static bool piix_no_sidpr(struct ata_host *host)
if (pdev->vendor == PCI_VENDOR_ID_INTEL && pdev->device == 0x2920 &&
pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
pdev->subsystem_device == 0xb049) {
- dev_printk(KERN_WARNING, host->dev,
- "Samsung DB-P70 detected, disabling SIDPR\n");
+ dev_warn(host->dev,
+ "Samsung DB-P70 detected, disabling SIDPR\n");
return true;
}
@@ -1478,8 +1517,8 @@ static int __devinit piix_init_sidpr(struct ata_host *host)
piix_sidpr_scr_read(link0, SCR_CONTROL, &scontrol);
if ((scontrol & 0xf00) != 0x300) {
- dev_printk(KERN_INFO, host->dev, "SCR access via "
- "SIDPR is available but doesn't work\n");
+ dev_info(host->dev,
+ "SCR access via SIDPR is available but doesn't work\n");
return 0;
}
}
@@ -1528,8 +1567,7 @@ static void piix_iocfg_bit18_quirk(struct ata_host *host)
* affected systems.
*/
if (hpriv->saved_iocfg & (1 << 18)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "applying IOCFG bit18 quirk\n");
+ dev_info(&pdev->dev, "applying IOCFG bit18 quirk\n");
pci_write_config_dword(pdev, PIIX_IOCFG,
hpriv->saved_iocfg & ~(1 << 18));
}
@@ -1588,7 +1626,6 @@ static bool piix_broken_system_poweroff(struct pci_dev *pdev)
static int __devinit piix_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int printed_version;
struct device *dev = &pdev->dev;
struct ata_port_info port_info[2];
const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
@@ -1598,9 +1635,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev,
struct piix_host_priv *hpriv;
int rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* no hotplugging support for later devices (FIXME) */
if (!in_module_init && ent->driver_data >= ich5_sata)
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index b64e4a7..41ffb8c 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -82,6 +82,8 @@ static void ahci_pmp_attach(struct ata_port *ap);
static void ahci_pmp_detach(struct ata_port *ap);
static int ahci_softreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
+static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
static int ahci_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
static void ahci_postreset(struct ata_link *link, unsigned int *class);
@@ -178,6 +180,12 @@ struct ata_port_operations ahci_ops = {
};
EXPORT_SYMBOL_GPL(ahci_ops);
+struct ata_port_operations ahci_pmp_retry_srst_ops = {
+ .inherits = &ahci_ops,
+ .softreset = ahci_pmp_retry_softreset,
+};
+EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
+
int ahci_em_messages = 1;
EXPORT_SYMBOL_GPL(ahci_em_messages);
module_param(ahci_em_messages, int, 0444);
@@ -286,10 +294,10 @@ static ssize_t ahci_read_em_buffer(struct device *dev,
/* the count should not be larger than PAGE_SIZE */
if (count > PAGE_SIZE) {
if (printk_ratelimit())
- ata_port_printk(ap, KERN_WARNING,
- "EM read buffer size too large: "
- "buffer size %u, page size %lu\n",
- hpriv->em_buf_sz, PAGE_SIZE);
+ ata_port_warn(ap,
+ "EM read buffer size too large: "
+ "buffer size %u, page size %lu\n",
+ hpriv->em_buf_sz, PAGE_SIZE);
count = PAGE_SIZE;
}
@@ -410,51 +418,46 @@ void ahci_save_initial_config(struct device *dev,
/* some chips have errata preventing 64bit use */
if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
- dev_printk(KERN_INFO, dev,
- "controller can't do 64bit DMA, forcing 32bit\n");
+ dev_info(dev, "controller can't do 64bit DMA, forcing 32bit\n");
cap &= ~HOST_CAP_64;
}
if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
- dev_printk(KERN_INFO, dev,
- "controller can't do NCQ, turning off CAP_NCQ\n");
+ dev_info(dev, "controller can't do NCQ, turning off CAP_NCQ\n");
cap &= ~HOST_CAP_NCQ;
}
if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
- dev_printk(KERN_INFO, dev,
- "controller can do NCQ, turning on CAP_NCQ\n");
+ dev_info(dev, "controller can do NCQ, turning on CAP_NCQ\n");
cap |= HOST_CAP_NCQ;
}
if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
- dev_printk(KERN_INFO, dev,
- "controller can't do PMP, turning off CAP_PMP\n");
+ dev_info(dev, "controller can't do PMP, turning off CAP_PMP\n");
cap &= ~HOST_CAP_PMP;
}
if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
- dev_printk(KERN_INFO, dev,
- "controller can't do SNTF, turning off CAP_SNTF\n");
+ dev_info(dev,
+ "controller can't do SNTF, turning off CAP_SNTF\n");
cap &= ~HOST_CAP_SNTF;
}
if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
- dev_printk(KERN_INFO, dev,
- "controller can do FBS, turning on CAP_FBS\n");
+ dev_info(dev, "controller can do FBS, turning on CAP_FBS\n");
cap |= HOST_CAP_FBS;
}
if (force_port_map && port_map != force_port_map) {
- dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
- port_map, force_port_map);
+ dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
+ port_map, force_port_map);
port_map = force_port_map;
}
if (mask_port_map) {
- dev_printk(KERN_WARNING, dev, "masking port_map 0x%x -> 0x%x\n",
- port_map,
- port_map & mask_port_map);
+ dev_warn(dev, "masking port_map 0x%x -> 0x%x\n",
+ port_map,
+ port_map & mask_port_map);
port_map &= mask_port_map;
}
@@ -470,10 +473,9 @@ void ahci_save_initial_config(struct device *dev,
* port_map and let it be generated from n_ports.
*/
if (map_ports > ahci_nr_ports(cap)) {
- dev_printk(KERN_WARNING, dev,
- "implemented port map (0x%x) contains more "
- "ports than nr_ports (%u), using nr_ports\n",
- port_map, ahci_nr_ports(cap));
+ dev_warn(dev,
+ "implemented port map (0x%x) contains more ports than nr_ports (%u), using nr_ports\n",
+ port_map, ahci_nr_ports(cap));
port_map = 0;
}
}
@@ -481,8 +483,7 @@ void ahci_save_initial_config(struct device *dev,
/* fabricate port_map from cap.nr_ports */
if (!port_map) {
port_map = (1 << ahci_nr_ports(cap)) - 1;
- dev_printk(KERN_WARNING, dev,
- "forcing PORTS_IMPL to 0x%x\n", port_map);
+ dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
/* write the fixed up value to the PI register */
hpriv->saved_port_map = port_map;
@@ -822,8 +823,8 @@ int ahci_reset_controller(struct ata_host *host)
HOST_RESET, 10, 1000);
if (tmp & HOST_RESET) {
- dev_printk(KERN_ERR, host->dev,
- "controller reset failed (0x%x)\n", tmp);
+ dev_err(host->dev, "controller reset failed (0x%x)\n",
+ tmp);
return -EIO;
}
@@ -835,8 +836,7 @@ int ahci_reset_controller(struct ata_host *host)
*/
ahci_restore_initial_config(host);
} else
- dev_printk(KERN_INFO, host->dev,
- "skipping global host reset\n");
+ dev_info(host->dev, "skipping global host reset\n");
return 0;
}
@@ -1132,12 +1132,12 @@ static void ahci_dev_config(struct ata_device *dev)
if (hpriv->flags & AHCI_HFLAG_SECT255) {
dev->max_sectors = 255;
- ata_dev_printk(dev, KERN_INFO,
- "SB600 AHCI: limiting to 255 sectors per cmd\n");
+ ata_dev_info(dev,
+ "SB600 AHCI: limiting to 255 sectors per cmd\n");
}
}
-static unsigned int ahci_dev_classify(struct ata_port *ap)
+unsigned int ahci_dev_classify(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
struct ata_taskfile tf;
@@ -1151,6 +1151,7 @@ static unsigned int ahci_dev_classify(struct ata_port *ap)
return ata_dev_classify(&tf);
}
+EXPORT_SYMBOL_GPL(ahci_dev_classify);
void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
u32 opts)
@@ -1247,9 +1248,11 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
{
struct ata_port *ap = link->ap;
struct ahci_host_priv *hpriv = ap->host->private_data;
+ struct ahci_port_priv *pp = ap->private_data;
const char *reason = NULL;
unsigned long now, msecs;
struct ata_taskfile tf;
+ bool fbs_disabled = false;
int rc;
DPRINTK("ENTER\n");
@@ -1257,8 +1260,17 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
/* prepare for SRST (AHCI-1.1 10.4.1) */
rc = ahci_kick_engine(ap);
if (rc && rc != -EOPNOTSUPP)
- ata_link_printk(link, KERN_WARNING,
- "failed to reset engine (errno=%d)\n", rc);
+ ata_link_warn(link, "failed to reset engine (errno=%d)\n", rc);
+
+ /*
+ * According to AHCI-1.2 9.3.9: if FBS is enable, software shall
+ * clear PxFBS.EN to '0' prior to issuing software reset to devices
+ * that is attached to port multiplier.
+ */
+ if (!ata_is_host_link(link) && pp->fbs_enabled) {
+ ahci_disable_fbs(ap);
+ fbs_disabled = true;
+ }
ata_tf_init(link->device, &tf);
@@ -1291,8 +1303,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
* be trusted. Treat device readiness timeout as link
* offline.
*/
- ata_link_printk(link, KERN_INFO,
- "device not ready, treating as offline\n");
+ ata_link_info(link, "device not ready, treating as offline\n");
*class = ATA_DEV_NONE;
} else if (rc) {
/* link occupied, -ENODEV too is an error */
@@ -1301,11 +1312,15 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
} else
*class = ahci_dev_classify(ap);
+ /* re-enable FBS if disabled before */
+ if (fbs_disabled)
+ ahci_enable_fbs(ap);
+
DPRINTK("EXIT, class=%u\n", *class);
return 0;
fail:
- ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
+ ata_link_err(link, "softreset failed (%s)\n", reason);
return rc;
}
@@ -1329,6 +1344,55 @@ static int ahci_softreset(struct ata_link *link, unsigned int *class,
}
EXPORT_SYMBOL_GPL(ahci_do_softreset);
+static int ahci_bad_pmp_check_ready(struct ata_link *link)
+{
+ void __iomem *port_mmio = ahci_port_base(link->ap);
+ u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
+ u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
+
+ /*
+ * There is no need to check TFDATA if BAD PMP is found due to HW bug,
+ * which can save timeout delay.
+ */
+ if (irq_status & PORT_IRQ_BAD_PMP)
+ return -EIO;
+
+ return ata_check_ready(status);
+}
+
+int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ int pmp = sata_srst_pmp(link);
+ int rc;
+ u32 irq_sts;
+
+ DPRINTK("ENTER\n");
+
+ rc = ahci_do_softreset(link, class, pmp, deadline,
+ ahci_bad_pmp_check_ready);
+
+ /*
+ * Soft reset fails with IPMS set when PMP is enabled but
+ * SATA HDD/ODD is connected to SATA port, do soft reset
+ * again to port 0.
+ */
+ if (rc == -EIO) {
+ irq_sts = readl(port_mmio + PORT_IRQ_STAT);
+ if (irq_sts & PORT_IRQ_BAD_PMP) {
+ ata_link_printk(link, KERN_WARNING,
+ "applying PMP SRST workaround "
+ "and retrying\n");
+ rc = ahci_do_softreset(link, class, 0, deadline,
+ ahci_check_ready);
+ }
+ }
+
+ return rc;
+}
+
static int ahci_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
@@ -1474,8 +1538,7 @@ static void ahci_fbs_dec_intr(struct ata_port *ap)
}
if (fbs & PORT_FBS_DEC)
- dev_printk(KERN_ERR, ap->host->dev,
- "failed to clear device error\n");
+ dev_err(ap->host->dev, "failed to clear device error\n");
}
static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
@@ -1606,8 +1669,7 @@ static void ahci_port_intr(struct ata_port *ap)
if (unlikely(resetting))
status &= ~PORT_IRQ_BAD_PMP;
- /* if LPM is enabled, PHYRDY doesn't mean anything */
- if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) {
+ if (sata_lpm_ignore_phy_events(&ap->link)) {
status &= ~PORT_IRQ_PHYRDY;
ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
}
@@ -1712,8 +1774,8 @@ irqreturn_t ahci_interrupt(int irq, void *dev_instance)
} else {
VPRINTK("port %u (no irq)\n", i);
if (ata_ratelimit())
- dev_printk(KERN_WARNING, host->dev,
- "interrupt on disabled port %u\n", i);
+ dev_warn(host->dev,
+ "interrupt on disabled port %u\n", i);
}
handled = 1;
@@ -1864,11 +1926,11 @@ static void ahci_enable_fbs(struct ata_port *ap)
writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
fbs = readl(port_mmio + PORT_FBS);
if (fbs & PORT_FBS_EN) {
- dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
+ dev_info(ap->host->dev, "FBS is enabled\n");
pp->fbs_enabled = true;
pp->fbs_last_dev = -1; /* initialization */
} else
- dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
+ dev_err(ap->host->dev, "Failed to enable FBS\n");
ahci_start_engine(ap);
}
@@ -1896,9 +1958,9 @@ static void ahci_disable_fbs(struct ata_port *ap)
writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
fbs = readl(port_mmio + PORT_FBS);
if (fbs & PORT_FBS_EN)
- dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
+ dev_err(ap->host->dev, "Failed to disable FBS\n");
else {
- dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
+ dev_info(ap->host->dev, "FBS is disabled\n");
pp->fbs_enabled = false;
}
@@ -1974,7 +2036,7 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
if (rc == 0)
ahci_power_down(ap);
else {
- ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
+ ata_port_err(ap, "%s (%d)\n", emsg, rc);
ahci_start_port(ap);
}
@@ -2002,14 +2064,12 @@ static int ahci_port_start(struct ata_port *ap)
if (cmd & PORT_CMD_FBSCP)
pp->fbs_supported = true;
else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
- dev_printk(KERN_INFO, dev,
- "port %d can do FBS, forcing FBSCP\n",
- ap->port_no);
+ dev_info(dev, "port %d can do FBS, forcing FBSCP\n",
+ ap->port_no);
pp->fbs_supported = true;
} else
- dev_printk(KERN_WARNING, dev,
- "port %d is not capable of FBS\n",
- ap->port_no);
+ dev_warn(dev, "port %d is not capable of FBS\n",
+ ap->port_no);
}
if (pp->fbs_supported) {
@@ -2071,7 +2131,7 @@ static void ahci_port_stop(struct ata_port *ap)
/* de-initialize port */
rc = ahci_deinit_port(ap, &emsg);
if (rc)
- ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
+ ata_port_warn(ap, "%s (%d)\n", emsg, rc);
}
void ahci_print_info(struct ata_host *host, const char *scc_s)
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index a791b8ce..bb7c5f1 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -218,12 +218,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
ata_acpi_uevent(dev->link->ap, dev, event);
}
-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
.handler = ata_acpi_dev_notify_dock,
.uevent = ata_acpi_dev_uevent,
};
-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
.handler = ata_acpi_ap_notify_dock,
.uevent = ata_acpi_ap_uevent,
};
@@ -332,25 +332,22 @@ int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm)
rc = -EINVAL;
if (ACPI_FAILURE(status)) {
- ata_port_printk(ap, KERN_ERR,
- "ACPI get timing mode failed (AE 0x%x)\n",
- status);
+ ata_port_err(ap, "ACPI get timing mode failed (AE 0x%x)\n",
+ status);
goto out_free;
}
out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_BUFFER) {
- ata_port_printk(ap, KERN_WARNING,
- "_GTM returned unexpected object type 0x%x\n",
- out_obj->type);
+ ata_port_warn(ap, "_GTM returned unexpected object type 0x%x\n",
+ out_obj->type);
goto out_free;
}
if (out_obj->buffer.length != sizeof(struct ata_acpi_gtm)) {
- ata_port_printk(ap, KERN_ERR,
- "_GTM returned invalid length %d\n",
- out_obj->buffer.length);
+ ata_port_err(ap, "_GTM returned invalid length %d\n",
+ out_obj->buffer.length);
goto out_free;
}
@@ -402,8 +399,8 @@ int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm)
if (status == AE_NOT_FOUND)
return -ENOENT;
if (ACPI_FAILURE(status)) {
- ata_port_printk(ap, KERN_ERR,
- "ACPI set timing mode failed (status=0x%x)\n", status);
+ ata_port_err(ap, "ACPI set timing mode failed (status=0x%x)\n",
+ status);
return -EINVAL;
}
return 0;
@@ -450,8 +447,8 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER: port#: %d\n",
- __func__, ap->port_no);
+ ata_dev_dbg(dev, "%s: ENTER: port#: %d\n",
+ __func__, ap->port_no);
/* _GTF has no input parameters */
status = acpi_evaluate_object(dev->acpi_handle, "_GTF", NULL, &output);
@@ -459,9 +456,8 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
if (ACPI_FAILURE(status)) {
if (status != AE_NOT_FOUND) {
- ata_dev_printk(dev, KERN_WARNING,
- "_GTF evaluation failed (AE 0x%x)\n",
- status);
+ ata_dev_warn(dev, "_GTF evaluation failed (AE 0x%x)\n",
+ status);
rc = -EINVAL;
}
goto out_free;
@@ -469,27 +465,24 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
if (!output.length || !output.pointer) {
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: Run _GTF: "
- "length or ptr is NULL (0x%llx, 0x%p)\n",
- __func__,
- (unsigned long long)output.length,
- output.pointer);
+ ata_dev_dbg(dev, "%s: Run _GTF: length or ptr is NULL (0x%llx, 0x%p)\n",
+ __func__,
+ (unsigned long long)output.length,
+ output.pointer);
rc = -EINVAL;
goto out_free;
}
if (out_obj->type != ACPI_TYPE_BUFFER) {
- ata_dev_printk(dev, KERN_WARNING,
- "_GTF unexpected object type 0x%x\n",
- out_obj->type);
+ ata_dev_warn(dev, "_GTF unexpected object type 0x%x\n",
+ out_obj->type);
rc = -EINVAL;
goto out_free;
}
if (out_obj->buffer.length % REGS_PER_GTF) {
- ata_dev_printk(dev, KERN_WARNING,
- "unexpected _GTF length (%d)\n",
- out_obj->buffer.length);
+ ata_dev_warn(dev, "unexpected _GTF length (%d)\n",
+ out_obj->buffer.length);
rc = -EINVAL;
goto out_free;
}
@@ -499,9 +492,8 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
if (gtf) {
*gtf = (void *)out_obj->buffer.pointer;
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG,
- "%s: returning gtf=%p, gtf_count=%d\n",
- __func__, *gtf, rc);
+ ata_dev_dbg(dev, "%s: returning gtf=%p, gtf_count=%d\n",
+ __func__, *gtf, rc);
}
return rc;
@@ -811,8 +803,8 @@ static int ata_acpi_push_id(struct ata_device *dev)
union acpi_object in_params[1];
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: ix = %d, port#: %d\n",
- __func__, dev->devno, ap->port_no);
+ ata_dev_dbg(dev, "%s: ix = %d, port#: %d\n",
+ __func__, dev->devno, ap->port_no);
/* Give the drive Identify data to the drive via the _SDD method */
/* _SDD: set up input parameters */
@@ -832,8 +824,7 @@ static int ata_acpi_push_id(struct ata_device *dev)
return -ENOENT;
if (ACPI_FAILURE(status)) {
- ata_dev_printk(dev, KERN_WARNING,
- "ACPI _SDD failed (AE 0x%x)\n", status);
+ ata_dev_warn(dev, "ACPI _SDD failed (AE 0x%x)\n", status);
return -EIO;
}
@@ -983,8 +974,8 @@ int ata_acpi_on_devcfg(struct ata_device *dev)
if (nr_executed) {
rc = ata_dev_reread_id(dev, 0);
if (rc < 0) {
- ata_dev_printk(dev, KERN_ERR, "failed to IDENTIFY "
- "after ACPI commands\n");
+ ata_dev_err(dev,
+ "failed to IDENTIFY after ACPI commands\n");
return rc;
}
}
@@ -1002,8 +993,7 @@ int ata_acpi_on_devcfg(struct ata_device *dev)
return rc;
}
- ata_dev_printk(dev, KERN_WARNING,
- "ACPI: failed the second time, disabled\n");
+ ata_dev_warn(dev, "ACPI: failed the second time, disabled\n");
dev->acpi_handle = NULL;
/* We can safely continue if no _GTF command has been executed
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index aa5f055..4e9beff 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -335,8 +335,7 @@ void ata_force_cbl(struct ata_port *ap)
continue;
ap->cbl = fe->param.cbl;
- ata_port_printk(ap, KERN_NOTICE,
- "FORCE: cable set to %s\n", fe->param.name);
+ ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
return;
}
}
@@ -378,8 +377,7 @@ static void ata_force_link_limits(struct ata_link *link)
/* only honor the first spd limit */
if (!did_spd && fe->param.spd_limit) {
link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
- ata_link_printk(link, KERN_NOTICE,
- "FORCE: PHY spd limit set to %s\n",
+ ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
fe->param.name);
did_spd = true;
}
@@ -387,7 +385,7 @@ static void ata_force_link_limits(struct ata_link *link)
/* let lflags stack */
if (fe->param.lflags) {
link->flags |= fe->param.lflags;
- ata_link_printk(link, KERN_NOTICE,
+ ata_link_notice(link,
"FORCE: link flag 0x%x forced -> 0x%x\n",
fe->param.lflags, link->flags);
}
@@ -442,8 +440,8 @@ static void ata_force_xfermask(struct ata_device *dev)
dev->pio_mask = pio_mask;
}
- ata_dev_printk(dev, KERN_NOTICE,
- "FORCE: xfer_mask set to %s\n", fe->param.name);
+ ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
+ fe->param.name);
return;
}
}
@@ -486,8 +484,8 @@ static void ata_force_horkage(struct ata_device *dev)
dev->horkage |= fe->param.horkage_on;
dev->horkage &= ~fe->param.horkage_off;
- ata_dev_printk(dev, KERN_NOTICE,
- "FORCE: horkage modified (%s)\n", fe->param.name);
+ ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
+ fe->param.name);
}
}
@@ -711,8 +709,8 @@ u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
sect = tf->lbal;
if (!sect) {
- ata_dev_printk(dev, KERN_WARNING, "device reported "
- "invalid CHS sector 0\n");
+ ata_dev_warn(dev,
+ "device reported invalid CHS sector 0\n");
sect = 1; /* oh well */
}
@@ -1230,8 +1228,9 @@ static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
if (err_mask) {
- ata_dev_printk(dev, KERN_WARNING, "failed to read native "
- "max address (err_mask=0x%x)\n", err_mask);
+ ata_dev_warn(dev,
+ "failed to read native max address (err_mask=0x%x)\n",
+ err_mask);
if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
return -EACCES;
return -EIO;
@@ -1292,8 +1291,9 @@ static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
if (err_mask) {
- ata_dev_printk(dev, KERN_WARNING, "failed to set "
- "max address (err_mask=0x%x)\n", err_mask);
+ ata_dev_warn(dev,
+ "failed to set max address (err_mask=0x%x)\n",
+ err_mask);
if (err_mask == AC_ERR_DEV &&
(tf.feature & (ATA_ABORTED | ATA_IDNF)))
return -EACCES;
@@ -1336,8 +1336,8 @@ static int ata_hpa_resize(struct ata_device *dev)
* be unlocked, skip HPA resizing.
*/
if (rc == -EACCES || !unlock_hpa) {
- ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
- "broken, skipping HPA handling\n");
+ ata_dev_warn(dev,
+ "HPA support seems broken, skipping HPA handling\n");
dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
/* we can continue if device aborted the command */
@@ -1355,14 +1355,13 @@ static int ata_hpa_resize(struct ata_device *dev)
return 0;
if (native_sectors > sectors)
- ata_dev_printk(dev, KERN_INFO,
+ ata_dev_info(dev,
"HPA detected: current %llu, native %llu\n",
(unsigned long long)sectors,
(unsigned long long)native_sectors);
else if (native_sectors < sectors)
- ata_dev_printk(dev, KERN_WARNING,
- "native sectors (%llu) is smaller than "
- "sectors (%llu)\n",
+ ata_dev_warn(dev,
+ "native sectors (%llu) is smaller than sectors (%llu)\n",
(unsigned long long)native_sectors,
(unsigned long long)sectors);
return 0;
@@ -1372,10 +1371,10 @@ static int ata_hpa_resize(struct ata_device *dev)
rc = ata_set_max_sectors(dev, native_sectors);
if (rc == -EACCES) {
/* if device aborted the command, skip HPA resizing */
- ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
- "(%llu -> %llu), skipping HPA handling\n",
- (unsigned long long)sectors,
- (unsigned long long)native_sectors);
+ ata_dev_warn(dev,
+ "device aborted resize (%llu -> %llu), skipping HPA handling\n",
+ (unsigned long long)sectors,
+ (unsigned long long)native_sectors);
dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
return 0;
} else if (rc)
@@ -1384,14 +1383,14 @@ static int ata_hpa_resize(struct ata_device *dev)
/* re-read IDENTIFY data */
rc = ata_dev_reread_id(dev, 0);
if (rc) {
- ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
- "data after HPA resizing\n");
+ ata_dev_err(dev,
+ "failed to re-read IDENTIFY data after HPA resizing\n");
return rc;
}
if (print_info) {
u64 new_sectors = ata_id_n_sectors(dev->id);
- ata_dev_printk(dev, KERN_INFO,
+ ata_dev_info(dev,
"HPA unlocked: %llu -> %llu, native %llu\n",
(unsigned long long)sectors,
(unsigned long long)new_sectors,
@@ -1661,8 +1660,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
ata_qc_complete(qc);
if (ata_msg_warn(ap))
- ata_dev_printk(dev, KERN_WARNING,
- "qc timeout (cmd 0x%x)\n", command);
+ ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
+ command);
}
spin_unlock_irqrestore(ap->lock, flags);
@@ -1876,7 +1875,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
int rc;
if (ata_msg_ctl(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
+ ata_dev_dbg(dev, "%s: ENTER\n", __func__);
retry:
ata_tf_init(dev, &tf);
@@ -1915,14 +1914,13 @@ retry:
if (err_mask) {
if (err_mask & AC_ERR_NODEV_HINT) {
- ata_dev_printk(dev, KERN_DEBUG,
- "NODEV after polling detection\n");
+ ata_dev_dbg(dev, "NODEV after polling detection\n");
return -ENOENT;
}
if (is_semb) {
- ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on "
- "device w/ SEMB sig, disabled\n");
+ ata_dev_info(dev,
+ "IDENTIFY failed on device w/ SEMB sig, disabled\n");
/* SEMB is not supported yet */
*p_class = ATA_DEV_SEMB_UNSUP;
return 0;
@@ -1948,8 +1946,8 @@ retry:
* both flavors of IDENTIFYs which happens
* sometimes with phantom devices.
*/
- ata_dev_printk(dev, KERN_DEBUG,
- "both IDENTIFYs aborted, assuming NODEV\n");
+ ata_dev_dbg(dev,
+ "both IDENTIFYs aborted, assuming NODEV\n");
return -ENOENT;
}
@@ -1959,9 +1957,9 @@ retry:
}
if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
- ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, "
- "class=%d may_fallback=%d tried_spinup=%d\n",
- class, may_fallback, tried_spinup);
+ ata_dev_dbg(dev, "dumping IDENTIFY data, "
+ "class=%d may_fallback=%d tried_spinup=%d\n",
+ class, may_fallback, tried_spinup);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
}
@@ -2040,8 +2038,8 @@ retry:
err_out:
if (ata_msg_warn(ap))
- ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
- "(%s, err_mask=0x%x)\n", reason, err_mask);
+ ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
+ reason, err_mask);
return rc;
}
@@ -2071,9 +2069,8 @@ static int ata_do_link_spd_horkage(struct ata_device *dev)
* guaranteed by setting sata_spd_limit to target_limit above.
*/
if (plink->sata_spd > target) {
- ata_dev_printk(dev, KERN_INFO,
- "applying link speed limit horkage to %s\n",
- sata_spd_string(target));
+ ata_dev_info(dev, "applying link speed limit horkage to %s\n",
+ sata_spd_string(target));
return -EAGAIN;
}
return 0;
@@ -2116,8 +2113,9 @@ static int ata_dev_config_ncq(struct ata_device *dev,
err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
SATA_FPDMA_AA);
if (err_mask) {
- ata_dev_printk(dev, KERN_ERR, "failed to enable AA"
- "(error_mask=0x%x)\n", err_mask);
+ ata_dev_err(dev,
+ "failed to enable AA (error_mask=0x%x)\n",
+ err_mask);
if (err_mask != AC_ERR_DEV) {
dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
return -EIO;
@@ -2160,31 +2158,28 @@ int ata_dev_configure(struct ata_device *dev)
int rc;
if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
- ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
- __func__);
+ ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
return 0;
}
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
+ ata_dev_dbg(dev, "%s: ENTER\n", __func__);
/* set horkage */
dev->horkage |= ata_dev_blacklisted(dev);
ata_force_horkage(dev);
if (dev->horkage & ATA_HORKAGE_DISABLE) {
- ata_dev_printk(dev, KERN_INFO,
- "unsupported device, disabling\n");
+ ata_dev_info(dev, "unsupported device, disabling\n");
ata_dev_disable(dev);
return 0;
}
if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
dev->class == ATA_DEV_ATAPI) {
- ata_dev_printk(dev, KERN_WARNING,
- "WARNING: ATAPI is %s, device ignored.\n",
- atapi_enabled ? "not supported with this driver"
- : "disabled");
+ ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
+ atapi_enabled ? "not supported with this driver"
+ : "disabled");
ata_dev_disable(dev);
return 0;
}
@@ -2193,6 +2188,16 @@ int ata_dev_configure(struct ata_device *dev)
if (rc)
return rc;
+ /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
+ if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
+ (id[76] & 0xe) == 0x2)
+ dev->horkage |= ATA_HORKAGE_NOLPM;
+
+ if (dev->horkage & ATA_HORKAGE_NOLPM) {
+ ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
+ dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
+ }
+
/* let ACPI work its magic */
rc = ata_acpi_on_devcfg(dev);
if (rc)
@@ -2205,12 +2210,12 @@ int ata_dev_configure(struct ata_device *dev)
/* print device capabilities */
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG,
- "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
- "85:%04x 86:%04x 87:%04x 88:%04x\n",
- __func__,
- id[49], id[82], id[83], id[84],
- id[85], id[86], id[87], id[88]);
+ ata_dev_dbg(dev,
+ "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
+ "85:%04x 86:%04x 87:%04x 88:%04x\n",
+ __func__,
+ id[49], id[82], id[83], id[84],
+ id[85], id[86], id[87], id[88]);
/* initialize to-be-configured parameters */
dev->flags &= ~ATA_DFLAG_CFG_MASK;
@@ -2244,17 +2249,15 @@ int ata_dev_configure(struct ata_device *dev)
if (ata_id_is_cfa(id)) {
/* CPRM may make this media unusable */
if (id[ATA_ID_CFA_KEY_MGMT] & 1)
- ata_dev_printk(dev, KERN_WARNING,
- "supports DRM functions and may "
- "not be fully accessible.\n");
+ ata_dev_warn(dev,
+ "supports DRM functions and may not be fully accessible\n");
snprintf(revbuf, 7, "CFA");
} else {
snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
/* Warn the user if the device has TPM extensions */
if (ata_id_has_tpm(id))
- ata_dev_printk(dev, KERN_WARNING,
- "supports DRM functions and may "
- "not be fully accessible.\n");
+ ata_dev_warn(dev,
+ "supports DRM functions and may not be fully accessible\n");
}
dev->n_sectors = ata_id_n_sectors(id);
@@ -2291,12 +2294,11 @@ int ata_dev_configure(struct ata_device *dev)
/* print device info to dmesg */
if (ata_msg_drv(ap) && print_info) {
- ata_dev_printk(dev, KERN_INFO,
- "%s: %s, %s, max %s\n",
- revbuf, modelbuf, fwrevbuf,
- ata_mode_string(xfer_mask));
- ata_dev_printk(dev, KERN_INFO,
- "%Lu sectors, multi %u: %s %s\n",
+ ata_dev_info(dev, "%s: %s, %s, max %s\n",
+ revbuf, modelbuf, fwrevbuf,
+ ata_mode_string(xfer_mask));
+ ata_dev_info(dev,
+ "%llu sectors, multi %u: %s %s\n",
(unsigned long long)dev->n_sectors,
dev->multi_count, lba_desc, ncq_desc);
}
@@ -2317,15 +2319,14 @@ int ata_dev_configure(struct ata_device *dev)
/* print device info to dmesg */
if (ata_msg_drv(ap) && print_info) {
- ata_dev_printk(dev, KERN_INFO,
- "%s: %s, %s, max %s\n",
- revbuf, modelbuf, fwrevbuf,
- ata_mode_string(xfer_mask));
- ata_dev_printk(dev, KERN_INFO,
- "%Lu sectors, multi %u, CHS %u/%u/%u\n",
- (unsigned long long)dev->n_sectors,
- dev->multi_count, dev->cylinders,
- dev->heads, dev->sectors);
+ ata_dev_info(dev, "%s: %s, %s, max %s\n",
+ revbuf, modelbuf, fwrevbuf,
+ ata_mode_string(xfer_mask));
+ ata_dev_info(dev,
+ "%llu sectors, multi %u, CHS %u/%u/%u\n",
+ (unsigned long long)dev->n_sectors,
+ dev->multi_count, dev->cylinders,
+ dev->heads, dev->sectors);
}
}
@@ -2342,8 +2343,7 @@ int ata_dev_configure(struct ata_device *dev)
rc = atapi_cdb_len(id);
if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
if (ata_msg_warn(ap))
- ata_dev_printk(dev, KERN_WARNING,
- "unsupported CDB len\n");
+ ata_dev_warn(dev, "unsupported CDB len\n");
rc = -EINVAL;
goto err_out_nosup;
}
@@ -2364,9 +2364,9 @@ int ata_dev_configure(struct ata_device *dev)
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_ENABLE, SATA_AN);
if (err_mask)
- ata_dev_printk(dev, KERN_ERR,
- "failed to enable ATAPI AN "
- "(err_mask=0x%x)\n", err_mask);
+ ata_dev_err(dev,
+ "failed to enable ATAPI AN (err_mask=0x%x)\n",
+ err_mask);
else {
dev->flags |= ATA_DFLAG_AN;
atapi_an_string = ", ATAPI AN";
@@ -2385,12 +2385,12 @@ int ata_dev_configure(struct ata_device *dev)
/* print device info to dmesg */
if (ata_msg_drv(ap) && print_info)
- ata_dev_printk(dev, KERN_INFO,
- "ATAPI: %s, %s, max %s%s%s%s\n",
- modelbuf, fwrevbuf,
- ata_mode_string(xfer_mask),
- cdb_intr_string, atapi_an_string,
- dma_dir_string);
+ ata_dev_info(dev,
+ "ATAPI: %s, %s, max %s%s%s%s\n",
+ modelbuf, fwrevbuf,
+ ata_mode_string(xfer_mask),
+ cdb_intr_string, atapi_an_string,
+ dma_dir_string);
}
/* determine max_sectors */
@@ -2402,8 +2402,7 @@ int ata_dev_configure(struct ata_device *dev)
200 sectors */
if (ata_dev_knobble(dev)) {
if (ata_msg_drv(ap) && print_info)
- ata_dev_printk(dev, KERN_INFO,
- "applying bridge limits\n");
+ ata_dev_info(dev, "applying bridge limits\n");
dev->udma_mask &= ATA_UDMA5;
dev->max_sectors = ATA_MAX_SECTORS;
}
@@ -2432,26 +2431,23 @@ int ata_dev_configure(struct ata_device *dev)
bugs */
if (print_info) {
- ata_dev_printk(dev, KERN_WARNING,
+ ata_dev_warn(dev,
"Drive reports diagnostics failure. This may indicate a drive\n");
- ata_dev_printk(dev, KERN_WARNING,
+ ata_dev_warn(dev,
"fault or invalid emulation. Contact drive vendor for information.\n");
}
}
if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
- ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires "
- "firmware update to be fully functional.\n");
- ata_dev_printk(dev, KERN_WARNING, " contact the vendor "
- "or visit http://ata.wiki.kernel.org.\n");
+ ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
+ ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
}
return 0;
err_out_nosup:
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG,
- "%s: EXIT, err\n", __func__);
+ ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
return rc;
}
@@ -2673,13 +2669,11 @@ static void sata_print_link_status(struct ata_link *link)
if (ata_phys_link_online(link)) {
tmp = (sstatus >> 4) & 0xf;
- ata_link_printk(link, KERN_INFO,
- "SATA link up %s (SStatus %X SControl %X)\n",
- sata_spd_string(tmp), sstatus, scontrol);
+ ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
+ sata_spd_string(tmp), sstatus, scontrol);
} else {
- ata_link_printk(link, KERN_INFO,
- "SATA link down (SStatus %X SControl %X)\n",
- sstatus, scontrol);
+ ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
+ sstatus, scontrol);
}
}
@@ -2768,8 +2762,8 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
link->sata_spd_limit = mask;
- ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
- sata_spd_string(fls(mask)));
+ ata_link_warn(link, "limiting SATA link speed to %s\n",
+ sata_spd_string(fls(mask)));
return 0;
}
@@ -2964,7 +2958,7 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
memset(&p, 0, sizeof(p));
- if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
+ if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
if (speed <= XFER_PIO_2)
p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
else if ((speed <= XFER_PIO_4) ||
@@ -3146,8 +3140,7 @@ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
snprintf(buf, sizeof(buf), "%s",
ata_mode_string(xfer_mask));
- ata_dev_printk(dev, KERN_WARNING,
- "limiting speed to %s\n", buf);
+ ata_dev_warn(dev, "limiting speed to %s\n", buf);
}
ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
@@ -3174,9 +3167,9 @@ static int ata_dev_set_mode(struct ata_device *dev)
dev_err_whine = " (SET_XFERMODE skipped)";
else {
if (nosetxfer)
- ata_dev_printk(dev, KERN_WARNING,
- "NOSETXFER but PATA detected - can't "
- "skip SETXFER, might malfunction\n");
+ ata_dev_warn(dev,
+ "NOSETXFER but PATA detected - can't "
+ "skip SETXFER, might malfunction\n");
err_mask = ata_dev_set_xfermode(dev);
}
@@ -3226,15 +3219,14 @@ static int ata_dev_set_mode(struct ata_device *dev)
DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
dev->xfer_shift, (int)dev->xfer_mode);
- ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
- ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
- dev_err_whine);
+ ata_dev_info(dev, "configured for %s%s\n",
+ ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
+ dev_err_whine);
return 0;
fail:
- ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
- "(err_mask=0x%x)\n", err_mask);
+ ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
return -EIO;
}
@@ -3296,7 +3288,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
/* step 2: always set host PIO timings */
ata_for_each_dev(dev, link, ENABLED) {
if (dev->pio_mode == 0xff) {
- ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
+ ata_dev_warn(dev, "no PIO support\n");
rc = -EINVAL;
goto out;
}
@@ -3414,7 +3406,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline,
if (!warned && time_after(now, start + 5 * HZ) &&
(deadline - now > 3 * HZ)) {
- ata_link_printk(link, KERN_WARNING,
+ ata_link_warn(link,
"link is slow to respond, please be patient "
"(ready=%d)\n", tmp);
warned = 1;
@@ -3562,16 +3554,14 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params,
} while ((scontrol & 0xf0f) != 0x300 && --tries);
if ((scontrol & 0xf0f) != 0x300) {
- ata_link_printk(link, KERN_ERR,
- "failed to resume link (SControl %X)\n",
- scontrol);
+ ata_link_warn(link, "failed to resume link (SControl %X)\n",
+ scontrol);
return 0;
}
if (tries < ATA_LINK_RESUME_TRIES)
- ata_link_printk(link, KERN_WARNING,
- "link resume succeeded after %d retries\n",
- ATA_LINK_RESUME_TRIES - tries);
+ ata_link_warn(link, "link resume succeeded after %d retries\n",
+ ATA_LINK_RESUME_TRIES - tries);
if ((rc = sata_link_debounce(link, params, deadline)))
return rc;
@@ -3688,8 +3678,9 @@ int ata_std_prereset(struct ata_link *link, unsigned long deadline)
rc = sata_link_resume(link, timing, deadline);
/* whine about phy resume failure but proceed */
if (rc && rc != -EOPNOTSUPP)
- ata_link_printk(link, KERN_WARNING, "failed to resume "
- "link for reset (errno=%d)\n", rc);
+ ata_link_warn(link,
+ "failed to resume link for reset (errno=%d)\n",
+ rc);
}
/* no point in trying softreset on offline link */
@@ -3805,8 +3796,7 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
/* online is set iff link is online && reset succeeded */
if (online)
*online = false;
- ata_link_printk(link, KERN_ERR,
- "COMRESET failed (errno=%d)\n", rc);
+ ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
}
DPRINTK("EXIT, rc=%d\n", rc);
return rc;
@@ -3890,8 +3880,8 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
if (dev->class != new_class) {
- ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
- dev->class, new_class);
+ ata_dev_info(dev, "class mismatch %d != %d\n",
+ dev->class, new_class);
return 0;
}
@@ -3901,14 +3891,14 @@ static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
if (strcmp(model[0], model[1])) {
- ata_dev_printk(dev, KERN_INFO, "model number mismatch "
- "'%s' != '%s'\n", model[0], model[1]);
+ ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
+ model[0], model[1]);
return 0;
}
if (strcmp(serial[0], serial[1])) {
- ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
- "'%s' != '%s'\n", serial[0], serial[1]);
+ ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
+ serial[0], serial[1]);
return 0;
}
@@ -3978,8 +3968,8 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
new_class != ATA_DEV_ATA &&
new_class != ATA_DEV_ATAPI &&
new_class != ATA_DEV_SEMB) {
- ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
- dev->class, new_class);
+ ata_dev_info(dev, "class mismatch %u != %u\n",
+ dev->class, new_class);
rc = -ENODEV;
goto fail;
}
@@ -4000,9 +3990,9 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
return 0;
/* n_sectors has changed */
- ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch %llu != %llu\n",
- (unsigned long long)n_sectors,
- (unsigned long long)dev->n_sectors);
+ ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
+ (unsigned long long)n_sectors,
+ (unsigned long long)dev->n_sectors);
/*
* Something could have caused HPA to be unlocked
@@ -4011,9 +4001,9 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
*/
if (dev->n_native_sectors == n_native_sectors &&
dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
- ata_dev_printk(dev, KERN_WARNING,
- "new n_sectors matches native, probably "
- "late HPA unlock, n_sectors updated\n");
+ ata_dev_warn(dev,
+ "new n_sectors matches native, probably "
+ "late HPA unlock, n_sectors updated\n");
/* use the larger n_sectors */
return 0;
}
@@ -4027,9 +4017,9 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
if (dev->n_native_sectors == n_native_sectors &&
dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
!(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
- ata_dev_printk(dev, KERN_WARNING,
- "old n_sectors matches native, probably "
- "late HPA lock, will try to unlock HPA\n");
+ ata_dev_warn(dev,
+ "old n_sectors matches native, probably "
+ "late HPA lock, will try to unlock HPA\n");
/* try unlocking HPA */
dev->flags |= ATA_DFLAG_UNLOCK_HPA;
rc = -EIO;
@@ -4040,7 +4030,7 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
dev->n_native_sectors = n_native_sectors;
dev->n_sectors = n_sectors;
fail:
- ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
+ ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
return rc;
}
@@ -4087,6 +4077,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
{ "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
{ "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
+ { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
/* Devices we expect to fail diagnostics */
@@ -4116,6 +4107,11 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
+ /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
+ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+ { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+ { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
+
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
{ "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
@@ -4159,6 +4155,26 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+ /* devices that don't properly handle TRIM commands */
+ { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
+
+ /*
+ * Some WD SATA-I drives spin up and down erratically when the link
+ * is put into the slumber mode. We don't have full list of the
+ * affected devices. Disable LPM if the device matches one of the
+ * known prefixes and is SATA-1. As a side effect LPM partial is
+ * lost too.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=57211
+ */
+ { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+
/* End Marker */
{ }
};
@@ -4370,15 +4386,15 @@ static void ata_dev_xfermask(struct ata_device *dev)
if (ata_dma_blacklisted(dev)) {
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
- ata_dev_printk(dev, KERN_WARNING,
- "device is on DMA blacklist, disabling DMA\n");
+ ata_dev_warn(dev,
+ "device is on DMA blacklist, disabling DMA\n");
}
if ((host->flags & ATA_HOST_SIMPLEX) &&
host->simplex_claimed && host->simplex_claimed != ap) {
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
- ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
- "other device, disabling DMA\n");
+ ata_dev_warn(dev,
+ "simplex DMA is claimed by other device, disabling DMA\n");
}
if (ap->flags & ATA_FLAG_NO_IORDY)
@@ -4398,8 +4414,8 @@ static void ata_dev_xfermask(struct ata_device *dev)
if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
/* UDMA/44 or higher would be available */
if (cable_is_40wire(ap)) {
- ata_dev_printk(dev, KERN_WARNING,
- "limited to UDMA/33 due to 40-wire cable\n");
+ ata_dev_warn(dev,
+ "limited to UDMA/33 due to 40-wire cable\n");
xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
}
@@ -4446,7 +4462,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
else /* In the ancient relic department - skip all of this */
return 0;
- err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ /* On some disks, this command causes spin-up, so we need longer timeout */
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
DPRINTK("EXIT, err_mask=%x\n", err_mask);
return err_mask;
@@ -4699,6 +4716,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
* ata_qc_new - Request an available ATA command, for queueing
* @ap: target port
*
+ * Some ATA host controllers may implement a queue depth which is less
+ * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
+ * the hardware limitation.
+ *
* LOCKING:
* None.
*/
@@ -4706,21 +4727,30 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
{
struct ata_queued_cmd *qc = NULL;
- unsigned int i;
+ unsigned int max_queue = ap->host->n_tags;
+ unsigned int i, tag;
/* no command while frozen */
if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
return NULL;
- /* the last tag is reserved for internal command. */
- for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
- if (!test_and_set_bit(i, &ap->qc_allocated)) {
- qc = __ata_qc_from_tag(ap, i);
+ for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
+ if (ap->flags & ATA_FLAG_LOWTAG)
+ tag = i;
+ else
+ tag = tag < max_queue ? tag : 0;
+
+ /* the last tag is reserved for internal command. */
+ if (tag == ATA_TAG_INTERNAL)
+ continue;
+
+ if (!test_and_set_bit(tag, &ap->qc_allocated)) {
+ qc = __ata_qc_from_tag(ap, tag);
+ qc->tag = tag;
+ ap->last_tag = tag;
break;
}
-
- if (qc)
- qc->tag = i;
+ }
return qc;
}
@@ -4966,8 +4996,8 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
done_mask = ap->qc_active ^ qc_active;
if (unlikely(done_mask & qc_active)) {
- ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
- "(%08x->%08x)\n", ap->qc_active, qc_active);
+ ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
+ ap->qc_active, qc_active);
return -EINVAL;
}
@@ -5859,9 +5889,9 @@ int ata_host_start(struct ata_host *host)
rc = ap->ops->port_start(ap);
if (rc) {
if (rc != -ENODEV)
- dev_printk(KERN_ERR, host->dev,
- "failed to start port %d "
- "(errno=%d)\n", i, rc);
+ dev_err(host->dev,
+ "failed to start port %d (errno=%d)\n",
+ i, rc);
goto err_out;
}
}
@@ -5901,6 +5931,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
{
spin_lock_init(&host->lock);
mutex_init(&host->eh_mutex);
+ host->n_tags = ATA_MAX_QUEUE - 1;
host->dev = dev;
host->flags = flags;
host->ops = ops;
@@ -5981,10 +6012,11 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
{
int i, rc;
+ host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
+
/* host must have been started */
if (!(host->flags & ATA_HOST_STARTED)) {
- dev_printk(KERN_ERR, host->dev,
- "BUG: trying to register unstarted host\n");
+ dev_err(host->dev, "BUG: trying to register unstarted host\n");
WARN_ON(1);
return -EINVAL;
}
@@ -6035,14 +6067,13 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
ap->udma_mask);
if (!ata_port_is_dummy(ap)) {
- ata_port_printk(ap, KERN_INFO,
- "%cATA max %s %s\n",
- (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
- ata_mode_string(xfer_mask),
- ap->link.eh_info.desc);
+ ata_port_info(ap, "%cATA max %s %s\n",
+ (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
+ ata_mode_string(xfer_mask),
+ ap->link.eh_info.desc);
ata_ehi_clear_desc(&ap->link.eh_info);
} else
- ata_port_printk(ap, KERN_INFO, "DUMMY\n");
+ ata_port_info(ap, "DUMMY\n");
}
/* perform each probe asynchronously */
@@ -6254,8 +6285,8 @@ int ata_pci_device_do_resume(struct pci_dev *pdev)
rc = pcim_enable_device(pdev);
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "failed to enable device after resume (%d)\n", rc);
+ dev_err(&pdev->dev,
+ "failed to enable device after resume (%d)\n", rc);
return rc;
}
@@ -6349,6 +6380,7 @@ static int __init ata_parse_force_one(char **cur,
{ "nohrst", .lflags = ATA_LFLAG_NO_HRST },
{ "nosrst", .lflags = ATA_LFLAG_NO_SRST },
{ "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
+ { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
};
char *start = *cur, *p = *cur;
char *id, *val, *endp;
@@ -6588,6 +6620,38 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
return tmp;
}
+/**
+ * sata_lpm_ignore_phy_events - test if PHY event should be ignored
+ * @link: Link receiving the event
+ *
+ * Test whether the received PHY event has to be ignored or not.
+ *
+ * LOCKING:
+ * None:
+ *
+ * RETURNS:
+ * True if the event has to be ignored.
+ */
+bool sata_lpm_ignore_phy_events(struct ata_link *link)
+{
+ unsigned long lpm_timeout = link->last_lpm_change +
+ msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
+
+ /* if LPM is enabled, PHYRDY doesn't mean anything */
+ if (link->lpm_policy > ATA_LPM_MAX_POWER)
+ return true;
+
+ /* ignore the first PHY event after the LPM policy changed
+ * as it is might be spurious
+ */
+ if ((link->flags & ATA_LFLAG_CHANGED) &&
+ time_before(jiffies, lpm_timeout))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
+
/*
* Dummy port_ops
*/
@@ -6612,6 +6676,82 @@ const struct ata_port_info ata_dummy_port_info = {
};
/*
+ * Utility print functions
+ */
+int ata_port_printk(const struct ata_port *ap, const char *level,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ r = printk("%sata%u: %pV", level, ap->print_id, &vaf);
+
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(ata_port_printk);
+
+int ata_link_printk(const struct ata_link *link, const char *level,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (sata_pmp_attached(link->ap) || link->ap->slave_link)
+ r = printk("%sata%u.%02u: %pV",
+ level, link->ap->print_id, link->pmp, &vaf);
+ else
+ r = printk("%sata%u: %pV",
+ level, link->ap->print_id, &vaf);
+
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(ata_link_printk);
+
+int ata_dev_printk(const struct ata_device *dev, const char *level,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ r = printk("%sata%u.%02u: %pV",
+ level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
+ &vaf);
+
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(ata_dev_printk);
+
+void ata_print_version(const struct device *dev, const char *version)
+{
+ dev_printk(KERN_DEBUG, dev, "version %s\n", version);
+}
+EXPORT_SYMBOL(ata_print_version);
+
+/*
* libata is essentially a library of internal helper functions for
* low-level ATA host controller drivers. As such, the API/ABI is
* likely to change as new drivers are added and updated.
@@ -6671,6 +6811,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
+EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
EXPORT_SYMBOL_GPL(sata_scr_valid);
EXPORT_SYMBOL_GPL(sata_scr_read);
EXPORT_SYMBOL_GPL(sata_scr_write);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 1cbb004..f54b0775 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -34,6 +34,7 @@
#include <linux/kernel.h>
#include <linux/blkdev.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -782,8 +783,9 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
spin_unlock_irqrestore(ap->lock, flags);
goto repeat;
}
- ata_port_printk(ap, KERN_ERR, "EH pending after %d "
- "tries, giving up\n", ATA_EH_MAX_TRIES);
+ ata_port_err(ap,
+ "EH pending after %d tries, giving up\n",
+ ATA_EH_MAX_TRIES);
ap->pflags &= ~ATA_PFLAG_EH_PENDING;
}
@@ -816,7 +818,7 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
schedule_delayed_work(&ap->hotplug_task, 0);
if (ap->pflags & ATA_PFLAG_RECOVERED)
- ata_port_printk(ap, KERN_INFO, "EH complete\n");
+ ata_port_info(ap, "EH complete\n");
ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
@@ -1284,14 +1286,14 @@ void ata_eh_qc_complete(struct ata_queued_cmd *qc)
* should be retried. To be used from EH.
*
* SCSI midlayer limits the number of retries to scmd->allowed.
- * scmd->retries is decremented for commands which get retried
+ * scmd->allowed is incremented for commands which get retried
* due to unrelated failures (qc->err_mask is zero).
*/
void ata_eh_qc_retry(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
- if (!qc->err_mask && scmd->retries)
- scmd->retries--;
+ if (!qc->err_mask)
+ scmd->allowed++;
__ata_eh_qc_complete(qc);
}
@@ -1310,7 +1312,7 @@ void ata_dev_disable(struct ata_device *dev)
return;
if (ata_msg_drv(dev->link->ap))
- ata_dev_printk(dev, KERN_WARNING, "disabled\n");
+ ata_dev_warn(dev, "disabled\n");
ata_acpi_on_disable(dev);
ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
dev->class++;
@@ -1515,8 +1517,8 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
for (i = 0; i < ATA_SECT_SIZE; i++)
csum += buf[i];
if (csum)
- ata_dev_printk(dev, KERN_WARNING,
- "invalid checksum 0x%x on log page 10h\n", csum);
+ ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n",
+ csum);
if (buf[0] & 0x80)
return -ENOENT;
@@ -1716,14 +1718,14 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
memset(&tf, 0, sizeof(tf));
rc = ata_eh_read_log_10h(dev, &tag, &tf);
if (rc) {
- ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
- "(errno=%d)\n", rc);
+ ata_link_err(link, "failed to read log page 10h (errno=%d)\n",
+ rc);
return;
}
if (!(link->sactive & (1 << tag))) {
- ata_link_printk(link, KERN_ERR, "log page 10h reported "
- "inactive tag %d\n", tag);
+ ata_link_err(link, "log page 10h reported inactive tag %d\n",
+ tag);
return;
}
@@ -1988,8 +1990,7 @@ static unsigned int ata_eh_speed_down(struct ata_device *dev,
(dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
dev->flags |= ATA_DFLAG_NCQ_OFF;
- ata_dev_printk(dev, KERN_WARNING,
- "NCQ disabled due to excessive errors\n");
+ ata_dev_warn(dev, "NCQ disabled due to excessive errors\n");
goto done;
}
@@ -2374,24 +2375,24 @@ static void ata_eh_link_report(struct ata_link *link)
ap->eh_tries);
if (ehc->i.dev) {
- ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
- "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
- ehc->i.err_mask, link->sactive, ehc->i.serror,
- ehc->i.action, frozen, tries_buf);
+ ata_dev_err(ehc->i.dev, "exception Emask 0x%x "
+ "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
+ ehc->i.err_mask, link->sactive, ehc->i.serror,
+ ehc->i.action, frozen, tries_buf);
if (desc)
- ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
+ ata_dev_err(ehc->i.dev, "%s\n", desc);
} else {
- ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
- "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
- ehc->i.err_mask, link->sactive, ehc->i.serror,
- ehc->i.action, frozen, tries_buf);
+ ata_link_err(link, "exception Emask 0x%x "
+ "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
+ ehc->i.err_mask, link->sactive, ehc->i.serror,
+ ehc->i.action, frozen, tries_buf);
if (desc)
- ata_link_printk(link, KERN_ERR, "%s\n", desc);
+ ata_link_err(link, "%s\n", desc);
}
#ifdef CONFIG_ATA_VERBOSE_ERROR
if (ehc->i.serror)
- ata_link_printk(link, KERN_ERR,
+ ata_link_err(link,
"SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
@@ -2456,11 +2457,11 @@ static void ata_eh_link_report(struct ata_link *link)
} else {
const char *descr = ata_get_cmd_descript(cmd->command);
if (descr)
- ata_dev_printk(qc->dev, KERN_ERR,
- "failed command: %s\n", descr);
+ ata_dev_err(qc->dev, "failed command: %s\n",
+ descr);
}
- ata_dev_printk(qc->dev, KERN_ERR,
+ ata_dev_err(qc->dev,
"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
"tag %d%s\n %s"
"res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
@@ -2481,11 +2482,9 @@ static void ata_eh_link_report(struct ata_link *link)
if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
ATA_ERR)) {
if (res->command & ATA_BUSY)
- ata_dev_printk(qc->dev, KERN_ERR,
- "status: { Busy }\n");
+ ata_dev_err(qc->dev, "status: { Busy }\n");
else
- ata_dev_printk(qc->dev, KERN_ERR,
- "status: { %s%s%s%s}\n",
+ ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
res->command & ATA_DRDY ? "DRDY " : "",
res->command & ATA_DF ? "DF " : "",
res->command & ATA_DRQ ? "DRQ " : "",
@@ -2495,8 +2494,7 @@ static void ata_eh_link_report(struct ata_link *link)
if (cmd->command != ATA_CMD_PACKET &&
(res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
ATA_ABORTED)))
- ata_dev_printk(qc->dev, KERN_ERR,
- "error: { %s%s%s%s}\n",
+ ata_dev_err(qc->dev, "error: { %s%s%s%s}\n",
res->feature & ATA_ICRC ? "ICRC " : "",
res->feature & ATA_UNC ? "UNC " : "",
res->feature & ATA_IDNF ? "IDNF " : "",
@@ -2535,8 +2533,7 @@ static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
return reset(link, classes, deadline);
}
-static int ata_eh_followup_srst_needed(struct ata_link *link,
- int rc, const unsigned int *classes)
+static int ata_eh_followup_srst_needed(struct ata_link *link, int rc)
{
if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
return 0;
@@ -2651,8 +2648,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
if (rc) {
if (rc == -ENOENT) {
- ata_link_printk(link, KERN_DEBUG,
- "port disabled. ignoring.\n");
+ ata_link_dbg(link, "port disabled--ignoring\n");
ehc->i.action &= ~ATA_EH_RESET;
ata_for_each_dev(dev, link, ALL)
@@ -2660,8 +2656,9 @@ int ata_eh_reset(struct ata_link *link, int classify,
rc = 0;
} else
- ata_link_printk(link, KERN_ERR,
- "prereset failed (errno=%d)\n", rc);
+ ata_link_err(link,
+ "prereset failed (errno=%d)\n",
+ rc);
goto out;
}
@@ -2690,8 +2687,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
if (reset) {
if (verbose)
- ata_link_printk(link, KERN_INFO, "%s resetting link\n",
- reset == softreset ? "soft" : "hard");
+ ata_link_info(link, "%s resetting link\n",
+ reset == softreset ? "soft" : "hard");
/* mark that this EH session started with reset */
ehc->last_reset = jiffies;
@@ -2711,8 +2708,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
int tmp;
if (verbose)
- ata_link_printk(slave, KERN_INFO,
- "hard resetting link\n");
+ ata_link_info(slave, "hard resetting link\n");
ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
tmp = ata_do_reset(slave, reset, classes, deadline,
@@ -2731,13 +2727,12 @@ int ata_eh_reset(struct ata_link *link, int classify,
/* perform follow-up SRST if necessary */
if (reset == hardreset &&
- ata_eh_followup_srst_needed(link, rc, classes)) {
+ ata_eh_followup_srst_needed(link, rc)) {
reset = softreset;
if (!reset) {
- ata_link_printk(link, KERN_ERR,
- "follow-up softreset required "
- "but no softreset available\n");
+ ata_link_err(link,
+ "follow-up softreset required but no softreset available\n");
failed_link = link;
rc = -EINVAL;
goto fail;
@@ -2752,8 +2747,8 @@ int ata_eh_reset(struct ata_link *link, int classify,
}
} else {
if (verbose)
- ata_link_printk(link, KERN_INFO, "no reset method "
- "available, skipping reset\n");
+ ata_link_info(link,
+ "no reset method available, skipping reset\n");
if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
lflags |= ATA_LFLAG_ASSUME_ATA;
}
@@ -2831,36 +2826,35 @@ int ata_eh_reset(struct ata_link *link, int classify,
ata_for_each_dev(dev, link, ALL) {
if (ata_phys_link_online(ata_dev_phys_link(dev))) {
if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
- ata_dev_printk(dev, KERN_DEBUG, "link online "
- "but device misclassifed\n");
+ ata_dev_dbg(dev, "link online but device misclassified\n");
classes[dev->devno] = ATA_DEV_NONE;
nr_unknown++;
}
} else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
if (ata_class_enabled(classes[dev->devno]))
- ata_dev_printk(dev, KERN_DEBUG, "link offline, "
- "clearing class %d to NONE\n",
- classes[dev->devno]);
+ ata_dev_dbg(dev,
+ "link offline, clearing class %d to NONE\n",
+ classes[dev->devno]);
classes[dev->devno] = ATA_DEV_NONE;
} else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
- ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
- "clearing UNKNOWN to NONE\n");
+ ata_dev_dbg(dev,
+ "link status unknown, clearing UNKNOWN to NONE\n");
classes[dev->devno] = ATA_DEV_NONE;
}
}
if (classify && nr_unknown) {
if (try < max_tries) {
- ata_link_printk(link, KERN_WARNING, "link online but "
- "%d devices misclassified, retrying\n",
- nr_unknown);
+ ata_link_warn(link,
+ "link online but %d devices misclassified, retrying\n",
+ nr_unknown);
failed_link = link;
rc = -EAGAIN;
goto fail;
}
- ata_link_printk(link, KERN_WARNING,
- "link online but %d devices misclassified, "
- "device detection might fail\n", nr_unknown);
+ ata_link_warn(link,
+ "link online but %d devices misclassified, "
+ "device detection might fail\n", nr_unknown);
}
/* reset successful, schedule revalidation */
@@ -2890,14 +2884,23 @@ int ata_eh_reset(struct ata_link *link, int classify,
sata_scr_read(link, SCR_STATUS, &sstatus))
rc = -ERESTART;
- if (rc == -ERESTART || try >= max_tries)
+ if (try >= max_tries) {
+ /*
+ * Thaw host port even if reset failed, so that the port
+ * can be retried on the next phy event. This risks
+ * repeated EH runs but seems to be a better tradeoff than
+ * shutting down a port after a botched hotplug attempt.
+ */
+ if (ata_is_host_link(link))
+ ata_eh_thaw_port(ap);
goto out;
+ }
now = jiffies;
if (time_before(now, deadline)) {
unsigned long delta = deadline - now;
- ata_link_printk(failed_link, KERN_WARNING,
+ ata_link_warn(failed_link,
"reset failed (errno=%d), retrying in %u secs\n",
rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
@@ -2907,6 +2910,16 @@ int ata_eh_reset(struct ata_link *link, int classify,
ata_eh_acquire(ap);
}
+ /*
+ * While disks spinup behind PMP, some controllers fail sending SRST.
+ * They need to be reset - as well as the PMP - before retrying.
+ */
+ if (rc == -ERESTART) {
+ if (ata_is_host_link(link))
+ ata_eh_thaw_port(ap);
+ goto out;
+ }
+
if (try == max_tries - 1) {
sata_down_spd_limit(link, 0);
if (slave)
@@ -2988,7 +3001,7 @@ static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
tf.protocol |= ATA_PROT_NODATA;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
if (park && (err_mask || tf.lbal != 0xc4)) {
- ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
+ ata_dev_err(dev, "head unload failed!\n");
ehc->unloaded_mask &= ~(1 << dev->devno);
}
}
@@ -3199,8 +3212,9 @@ static int atapi_eh_clear_ua(struct ata_device *dev)
err_mask = atapi_eh_tur(dev, &sense_key);
if (err_mask != 0 && err_mask != AC_ERR_DEV) {
- ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
- "failed (err_mask=0x%x)\n", err_mask);
+ ata_dev_warn(dev,
+ "TEST_UNIT_READY failed (err_mask=0x%x)\n",
+ err_mask);
return -EIO;
}
@@ -3209,14 +3223,14 @@ static int atapi_eh_clear_ua(struct ata_device *dev)
err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
if (err_mask) {
- ata_dev_printk(dev, KERN_WARNING, "failed to clear "
+ ata_dev_warn(dev, "failed to clear "
"UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
return -EIO;
}
}
- ata_dev_printk(dev, KERN_WARNING,
- "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
+ ata_dev_warn(dev, "UNIT ATTENTION persists after %d tries\n",
+ ATA_EH_UA_TRIES);
return 0;
}
@@ -3267,7 +3281,7 @@ static int ata_eh_maybe_retry_flush(struct ata_device *dev)
tf.flags |= ATA_TFLAG_DEVICE;
tf.protocol = ATA_PROT_NODATA;
- ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n",
+ ata_dev_warn(dev, "retrying FLUSH 0x%x Emask 0x%x\n",
tf.command, qc->err_mask);
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
@@ -3282,7 +3296,7 @@ static int ata_eh_maybe_retry_flush(struct ata_device *dev)
*/
qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
} else {
- ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n",
+ ata_dev_warn(dev, "FLUSH failed Emask 0x%x\n",
err_mask);
rc = -EIO;
@@ -3356,9 +3370,9 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_DISABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
- ata_dev_printk(dev, KERN_WARNING,
- "failed to disable DIPM, Emask 0x%x\n",
- err_mask);
+ ata_dev_warn(dev,
+ "failed to disable DIPM, Emask 0x%x\n",
+ err_mask);
rc = -EIO;
goto fail;
}
@@ -3400,7 +3414,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
err_mask = ata_dev_set_feature(dev,
SETFEATURES_SATA_ENABLE, SATA_DIPM);
if (err_mask && err_mask != AC_ERR_DEV) {
- ata_dev_printk(dev, KERN_WARNING,
+ ata_dev_warn(dev,
"failed to enable DIPM, Emask 0x%x\n",
err_mask);
rc = -EIO;
@@ -3409,6 +3423,9 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
}
}
+ link->last_lpm_change = jiffies;
+ link->flags |= ATA_LFLAG_CHANGED;
+
return 0;
fail:
@@ -3419,8 +3436,7 @@ fail:
/* if no device or only one more chance is left, disable LPM */
if (!dev || ehc->tries[dev->devno] <= 2) {
- ata_link_printk(link, KERN_WARNING,
- "disabling LPM on the link\n");
+ ata_link_warn(link, "disabling LPM on the link\n");
link->flags |= ATA_LFLAG_NO_LPM;
}
if (r_failed_dev)
@@ -3692,8 +3708,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
rc = ata_eh_reset(link, ata_link_nr_vacant(link),
prereset, softreset, hardreset, postreset);
if (rc) {
- ata_link_printk(link, KERN_ERR,
- "reset failed, giving up\n");
+ ata_link_err(link, "reset failed, giving up\n");
goto out;
}
}
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index cf9dc09..93ea335 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -8,6 +8,7 @@
*/
#include <linux/kernel.h>
+#include <linux/export.h>
#include <linux/libata.h>
#include <linux/slab.h>
#include "libata.h"
@@ -147,8 +148,8 @@ int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *r_val)
err_mask = sata_pmp_read(link, reg, r_val);
if (err_mask) {
- ata_link_printk(link, KERN_WARNING, "failed to read SCR %d "
- "(Emask=0x%x)\n", reg, err_mask);
+ ata_link_warn(link, "failed to read SCR %d (Emask=0x%x)\n",
+ reg, err_mask);
return -EIO;
}
return 0;
@@ -178,8 +179,8 @@ int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val)
err_mask = sata_pmp_write(link, reg, val);
if (err_mask) {
- ata_link_printk(link, KERN_WARNING, "failed to write SCR %d "
- "(Emask=0x%x)\n", reg, err_mask);
+ ata_link_warn(link, "failed to write SCR %d (Emask=0x%x)\n",
+ reg, err_mask);
return -EIO;
}
return 0;
@@ -231,8 +232,8 @@ static int sata_pmp_read_gscr(struct ata_device *dev, u32 *gscr)
err_mask = sata_pmp_read(dev->link, reg, &gscr[reg]);
if (err_mask) {
- ata_dev_printk(dev, KERN_ERR, "failed to read PMP "
- "GSCR[%d] (Emask=0x%x)\n", reg, err_mask);
+ ata_dev_err(dev, "failed to read PMP GSCR[%d] (Emask=0x%x)\n",
+ reg, err_mask);
return -EIO;
}
}
@@ -311,26 +312,25 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
}
if (print_info) {
- ata_dev_printk(dev, KERN_INFO, "Port Multiplier %s, "
- "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
- sata_pmp_spec_rev_str(gscr), vendor, devid,
- sata_pmp_gscr_rev(gscr),
- nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN],
- gscr[SATA_PMP_GSCR_FEAT]);
+ ata_dev_info(dev, "Port Multiplier %s, "
+ "0x%04x:0x%04x r%d, %d ports, feat 0x%x/0x%x\n",
+ sata_pmp_spec_rev_str(gscr), vendor, devid,
+ sata_pmp_gscr_rev(gscr),
+ nr_ports, gscr[SATA_PMP_GSCR_FEAT_EN],
+ gscr[SATA_PMP_GSCR_FEAT]);
if (!(dev->flags & ATA_DFLAG_AN))
- ata_dev_printk(dev, KERN_INFO,
+ ata_dev_info(dev,
"Asynchronous notification not supported, "
- "hotplug won't\n work on fan-out "
- "ports. Use warm-plug instead.\n");
+ "hotplug won't work on fan-out ports. Use warm-plug instead.\n");
}
return 0;
fail:
- ata_dev_printk(dev, KERN_ERR,
- "failed to configure Port Multiplier (%s, Emask=0x%x)\n",
- reason, err_mask);
+ ata_dev_err(dev,
+ "failed to configure Port Multiplier (%s, Emask=0x%x)\n",
+ reason, err_mask);
return rc;
}
@@ -389,8 +389,9 @@ static void sata_pmp_quirks(struct ata_port *ap)
/* link reports offline after LPM */
link->flags |= ATA_LFLAG_NO_LPM;
- /* Class code report is unreliable and SRST
- * times out under certain configurations.
+ /*
+ * Class code report is unreliable and SRST times
+ * out under certain configurations.
*/
if (link->pmp < 5)
link->flags |= ATA_LFLAG_NO_SRST |
@@ -402,20 +403,17 @@ static void sata_pmp_quirks(struct ata_port *ap)
ATA_LFLAG_ASSUME_SEMB;
}
} else if (vendor == 0x1095 && devid == 0x4723) {
- /* sil4723 quirks */
- ata_for_each_link(link, ap, EDGE) {
- /* link reports offline after LPM */
- link->flags |= ATA_LFLAG_NO_LPM;
-
- /* class code report is unreliable */
- if (link->pmp < 2)
- link->flags |= ATA_LFLAG_ASSUME_ATA;
-
- /* the config device at port 2 locks up on SRST */
- if (link->pmp == 2)
- link->flags |= ATA_LFLAG_NO_SRST |
- ATA_LFLAG_ASSUME_ATA;
- }
+ /*
+ * sil4723 quirks
+ *
+ * Link reports offline after LPM. Class code report is
+ * unreliable. SIMG PMPs never got SRST reliable and the
+ * config device at port 2 locks up on SRST.
+ */
+ ata_for_each_link(link, ap, EDGE)
+ link->flags |= ATA_LFLAG_NO_LPM |
+ ATA_LFLAG_NO_SRST |
+ ATA_LFLAG_ASSUME_ATA;
} else if (vendor == 0x1095 && devid == 0x4726) {
/* sil4726 quirks */
ata_for_each_link(link, ap, EDGE) {
@@ -449,8 +447,11 @@ static void sata_pmp_quirks(struct ata_port *ap)
* otherwise. Don't try hard to recover it.
*/
ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
- } else if (vendor == 0x197b && devid == 0x2352) {
- /* chip found in Thermaltake BlackX Duet, jmicron JMB350? */
+ } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) {
+ /*
+ * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350?
+ * 0x0325: jmicron JMB394.
+ */
ata_for_each_link(link, ap, EDGE) {
/* SRST breaks detection and disks get misclassified
* LPM disabled to avoid potential problems
@@ -459,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
ATA_LFLAG_NO_SRST |
ATA_LFLAG_ASSUME_ATA;
}
+ } else if (vendor == 0x11ab && devid == 0x4140) {
+ /* Marvell 4140 quirks */
+ ata_for_each_link(link, ap, EDGE) {
+ /* port 4 is for SEMB device and it doesn't like SRST */
+ if (link->pmp == 4)
+ link->flags |= ATA_LFLAG_DISABLED;
+ }
}
}
@@ -485,20 +493,17 @@ int sata_pmp_attach(struct ata_device *dev)
/* is it hanging off the right place? */
if (!sata_pmp_supported(ap)) {
- ata_dev_printk(dev, KERN_ERR,
- "host does not support Port Multiplier\n");
+ ata_dev_err(dev, "host does not support Port Multiplier\n");
return -EINVAL;
}
if (!ata_is_host_link(link)) {
- ata_dev_printk(dev, KERN_ERR,
- "Port Multipliers cannot be nested\n");
+ ata_dev_err(dev, "Port Multipliers cannot be nested\n");
return -EINVAL;
}
if (dev->devno) {
- ata_dev_printk(dev, KERN_ERR,
- "Port Multiplier must be the first device\n");
+ ata_dev_err(dev, "Port Multiplier must be the first device\n");
return -EINVAL;
}
@@ -517,8 +522,7 @@ int sata_pmp_attach(struct ata_device *dev)
rc = sata_pmp_init_links(ap, sata_pmp_gscr_ports(dev->gscr));
if (rc) {
- ata_dev_printk(dev, KERN_INFO,
- "failed to initialize PMP links\n");
+ ata_dev_info(dev, "failed to initialize PMP links\n");
goto fail;
}
@@ -562,7 +566,7 @@ static void sata_pmp_detach(struct ata_device *dev)
struct ata_link *tlink;
unsigned long flags;
- ata_dev_printk(dev, KERN_INFO, "Port Multiplier detaching\n");
+ ata_dev_info(dev, "Port Multiplier detaching\n");
WARN_ON(!ata_is_host_link(link) || dev->devno ||
link->pmp != SATA_PMP_CTRL_PORT);
@@ -609,23 +613,23 @@ static int sata_pmp_same_pmp(struct ata_device *dev, const u32 *new_gscr)
new_nr_ports = sata_pmp_gscr_ports(new_gscr);
if (old_vendor != new_vendor) {
- ata_dev_printk(dev, KERN_INFO, "Port Multiplier "
- "vendor mismatch '0x%x' != '0x%x'\n",
- old_vendor, new_vendor);
+ ata_dev_info(dev,
+ "Port Multiplier vendor mismatch '0x%x' != '0x%x'\n",
+ old_vendor, new_vendor);
return 0;
}
if (old_devid != new_devid) {
- ata_dev_printk(dev, KERN_INFO, "Port Multiplier "
- "device ID mismatch '0x%x' != '0x%x'\n",
- old_devid, new_devid);
+ ata_dev_info(dev,
+ "Port Multiplier device ID mismatch '0x%x' != '0x%x'\n",
+ old_devid, new_devid);
return 0;
}
if (old_nr_ports != new_nr_ports) {
- ata_dev_printk(dev, KERN_INFO, "Port Multiplier "
- "nr_ports mismatch '0x%x' != '0x%x'\n",
- old_nr_ports, new_nr_ports);
+ ata_dev_info(dev,
+ "Port Multiplier nr_ports mismatch '0x%x' != '0x%x'\n",
+ old_nr_ports, new_nr_ports);
return 0;
}
@@ -691,8 +695,7 @@ static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class)
return 0;
fail:
- ata_dev_printk(dev, KERN_ERR,
- "PMP revalidation failed (errno=%d)\n", rc);
+ ata_dev_err(dev, "PMP revalidation failed (errno=%d)\n", rc);
DPRINTK("EXIT, rc=%d\n", rc);
return rc;
}
@@ -716,13 +719,14 @@ static int sata_pmp_revalidate_quick(struct ata_device *dev)
err_mask = sata_pmp_read(dev->link, SATA_PMP_GSCR_PROD_ID, &prod_id);
if (err_mask) {
- ata_dev_printk(dev, KERN_ERR, "failed to read PMP product ID "
- "(Emask=0x%x)\n", err_mask);
+ ata_dev_err(dev,
+ "failed to read PMP product ID (Emask=0x%x)\n",
+ err_mask);
return -EIO;
}
if (prod_id != dev->gscr[SATA_PMP_GSCR_PROD_ID]) {
- ata_dev_printk(dev, KERN_ERR, "PMP product ID mismatch\n");
+ ata_dev_err(dev, "PMP product ID mismatch\n");
/* something weird is going on, request full PMP recovery */
return -EIO;
}
@@ -777,8 +781,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
rc = ata_eh_reset(link, 0, prereset, softreset, hardreset,
postreset);
if (rc) {
- ata_link_printk(link, KERN_ERR,
- "failed to reset PMP, giving up\n");
+ ata_link_err(link, "failed to reset PMP, giving up\n");
goto fail;
}
@@ -819,9 +822,9 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
ehc->i.action |= ATA_EH_RESET;
goto retry;
} else {
- ata_dev_printk(dev, KERN_ERR, "failed to recover PMP "
- "after %d tries, giving up\n",
- ATA_EH_PMP_TRIES);
+ ata_dev_err(dev,
+ "failed to recover PMP after %d tries, giving up\n",
+ ATA_EH_PMP_TRIES);
goto fail;
}
}
@@ -867,8 +870,9 @@ static int sata_pmp_eh_handle_disabled_links(struct ata_port *ap)
/* unconditionally clear SError.N */
rc = sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG);
if (rc) {
- ata_link_printk(link, KERN_ERR, "failed to clear "
- "SError.N (errno=%d)\n", rc);
+ ata_link_err(link,
+ "failed to clear SError.N (errno=%d)\n",
+ rc);
return rc;
}
@@ -890,7 +894,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
/* disable this link */
if (!(link->flags & ATA_LFLAG_DISABLED)) {
- ata_link_printk(link, KERN_WARNING,
+ ata_link_warn(link,
"failed to recover link after %d tries, disabling\n",
ATA_EH_PMP_LINK_TRIES);
@@ -974,7 +978,7 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
err_mask = sata_pmp_write(pmp_link, SATA_PMP_GSCR_FEAT_EN,
gscr[SATA_PMP_GSCR_FEAT_EN]);
if (err_mask) {
- ata_link_printk(pmp_link, KERN_WARNING,
+ ata_link_warn(pmp_link,
"failed to disable NOTIFY (err_mask=0x%x)\n",
err_mask);
goto pmp_fail;
@@ -1018,8 +1022,9 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
err_mask = sata_pmp_write(pmp_link, SATA_PMP_GSCR_FEAT_EN,
gscr[SATA_PMP_GSCR_FEAT_EN]);
if (err_mask) {
- ata_dev_printk(pmp_dev, KERN_ERR, "failed to write "
- "PMP_FEAT_EN (Emask=0x%x)\n", err_mask);
+ ata_dev_err(pmp_dev,
+ "failed to write PMP_FEAT_EN (Emask=0x%x)\n",
+ err_mask);
rc = -EIO;
goto pmp_fail;
}
@@ -1028,8 +1033,9 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
/* check GSCR_ERROR */
err_mask = sata_pmp_read(pmp_link, SATA_PMP_GSCR_ERROR, &gscr_error);
if (err_mask) {
- ata_dev_printk(pmp_dev, KERN_ERR, "failed to read "
- "PMP_GSCR_ERROR (Emask=0x%x)\n", err_mask);
+ ata_dev_err(pmp_dev,
+ "failed to read PMP_GSCR_ERROR (Emask=0x%x)\n",
+ err_mask);
rc = -EIO;
goto pmp_fail;
}
@@ -1043,17 +1049,16 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
ata_ehi_hotplugged(&link->eh_context.i);
cnt++;
} else {
- ata_link_printk(link, KERN_WARNING,
- "PHY status changed but maxed out on retries, "
- "giving up\n");
- ata_link_printk(link, KERN_WARNING,
- "Manully issue scan to resume this link\n");
+ ata_link_warn(link,
+ "PHY status changed but maxed out on retries, giving up\n");
+ ata_link_warn(link,
+ "Manually issue scan to resume this link\n");
}
}
if (cnt) {
- ata_port_printk(ap, KERN_INFO, "PMP SError.N set for some "
- "ports, repeating recovery\n");
+ ata_port_info(ap,
+ "PMP SError.N set for some ports, repeating recovery\n");
goto retry;
}
@@ -1081,9 +1086,8 @@ static int sata_pmp_eh_recover(struct ata_port *ap)
goto retry;
}
- ata_port_printk(ap, KERN_ERR,
- "failed to recover PMP after %d tries, giving up\n",
- ATA_EH_PMP_TRIES);
+ ata_port_err(ap, "failed to recover PMP after %d tries, giving up\n",
+ ATA_EH_PMP_TRIES);
sata_pmp_detach(pmp_dev);
ata_dev_disable(pmp_dev);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 3b42a5d..0ac7a5e 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -37,6 +37,7 @@
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
+#include <linux/export.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
@@ -110,12 +111,14 @@ static const char *ata_lpm_policy_names[] = {
[ATA_LPM_MIN_POWER] = "min_power",
};
-static ssize_t ata_scsi_lpm_store(struct device *dev,
+static ssize_t ata_scsi_lpm_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct Scsi_Host *shost = class_to_shost(dev);
+ struct Scsi_Host *shost = class_to_shost(device);
struct ata_port *ap = ata_shost_to_port(shost);
+ struct ata_link *link;
+ struct ata_device *dev;
enum ata_lpm_policy policy;
unsigned long flags;
@@ -131,10 +134,20 @@ static ssize_t ata_scsi_lpm_store(struct device *dev,
return -EINVAL;
spin_lock_irqsave(ap->lock, flags);
+
+ ata_for_each_link(link, ap, EDGE) {
+ ata_for_each_dev(dev, &ap->link, ENABLED) {
+ if (dev->horkage & ATA_HORKAGE_NOLPM) {
+ count = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+ }
+ }
+
ap->target_lpm_policy = policy;
ata_port_schedule_eh(ap);
+out_unlock:
spin_unlock_irqrestore(ap->lock, flags);
-
return count;
}
@@ -1110,8 +1123,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
/* configure draining */
buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
if (!buf) {
- ata_dev_printk(dev, KERN_ERR,
- "drain buffer allocation failed\n");
+ ata_dev_err(dev, "drain buffer allocation failed\n");
return -ENOMEM;
}
@@ -1129,7 +1141,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
* IDENTIFY_PACKET is executed as ATA_PROT_PIO.
*/
if (sdev->sector_size > PAGE_SIZE)
- ata_dev_printk(dev, KERN_WARNING,
+ ata_dev_warn(dev,
"sector_size=%u > PAGE_SIZE, PIO may malfunction\n",
sdev->sector_size);
@@ -1218,25 +1230,19 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
}
/**
- * ata_scsi_change_queue_depth - SCSI callback for queue depth config
+ * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth
+ * @ap: ATA port to which the device change the queue depth
* @sdev: SCSI device to configure queue depth for
* @queue_depth: new queue depth
* @reason: calling context
*
- * This is libata standard hostt->change_queue_depth callback.
- * SCSI will call into this callback when user tries to set queue
- * depth via sysfs.
- *
- * LOCKING:
- * SCSI layer (we don't care)
+ * libsas and libata have different approaches for associating a sdev to
+ * its ata_port.
*
- * RETURNS:
- * Newly configured queue depth.
*/
-int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth,
- int reason)
+int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
+ int queue_depth, int reason)
{
- struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev;
unsigned long flags;
@@ -1272,6 +1278,30 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth,
}
/**
+ * ata_scsi_change_queue_depth - SCSI callback for queue depth config
+ * @sdev: SCSI device to configure queue depth for
+ * @queue_depth: new queue depth
+ * @reason: calling context
+ *
+ * This is libata standard hostt->change_queue_depth callback.
+ * SCSI will call into this callback when user tries to set queue
+ * depth via sysfs.
+ *
+ * LOCKING:
+ * SCSI layer (we don't care)
+ *
+ * RETURNS:
+ * Newly configured queue depth.
+ */
+int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth,
+ int reason)
+{
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+
+ return __ata_change_queue_depth(ap, sdev, queue_depth, reason);
+}
+
+/**
* ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
* @qc: Storage for translated ATA taskfile
*
@@ -1786,8 +1816,7 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
if (cmd->sc_data_direction == DMA_FROM_DEVICE ||
cmd->sc_data_direction == DMA_TO_DEVICE) {
if (unlikely(scsi_bufflen(cmd) < 1)) {
- ata_dev_printk(dev, KERN_WARNING,
- "WARNING: zero len r/w req\n");
+ ata_dev_warn(dev, "WARNING: zero len r/w req\n");
goto err_did;
}
@@ -2444,7 +2473,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
rbuf[14] = (lowest_aligned >> 8) & 0x3f;
rbuf[15] = lowest_aligned;
- if (ata_id_has_trim(args->id)) {
+ if (ata_id_has_trim(args->id) &&
+ !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
rbuf[14] |= 0x80; /* TPE */
if (ata_id_has_zero_after_trim(args->id))
@@ -2971,9 +3001,8 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
* with the cached multi_count of libata
*/
if (multi_count != dev->multi_count)
- ata_dev_printk(dev, KERN_WARNING,
- "invalid multi_count %u ignored\n",
- multi_count);
+ ata_dev_warn(dev, "invalid multi_count %u ignored\n",
+ multi_count);
}
/*
@@ -3468,9 +3497,8 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
goto repeat;
}
- ata_port_printk(ap, KERN_ERR, "WARNING: synchronous SCSI scan "
- "failed without making any progress,\n"
- " switching to async\n");
+ ata_port_err(ap,
+ "WARNING: synchronous SCSI scan failed without making any progress, switching to async\n");
}
queue_delayed_work(system_long_wq, &ap->hotplug_task,
@@ -3552,8 +3580,8 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
mutex_unlock(&ap->scsi_host->scan_mutex);
if (sdev) {
- ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
- dev_name(&sdev->sdev_gendev));
+ ata_dev_info(dev, "detaching (SCSI %s)\n",
+ dev_name(&sdev->sdev_gendev));
scsi_remove_device(sdev);
scsi_device_put(sdev);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index b1b926c..22edc92 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/pci.h>
+#include <linux/module.h>
#include <linux/libata.h>
#include <linux/highmem.h>
@@ -227,9 +228,9 @@ int ata_sff_busy_sleep(struct ata_port *ap,
}
if (status != 0xff && (status & ATA_BUSY))
- ata_port_printk(ap, KERN_WARNING,
- "port is slow to respond, please be patient "
- "(Status 0x%x)\n", status);
+ ata_port_warn(ap,
+ "port is slow to respond, please be patient (Status 0x%x)\n",
+ status);
timeout = ata_deadline(timer_start, tmout);
while (status != 0xff && (status & ATA_BUSY) &&
@@ -242,9 +243,9 @@ int ata_sff_busy_sleep(struct ata_port *ap,
return -ENODEV;
if (status & ATA_BUSY) {
- ata_port_printk(ap, KERN_ERR, "port failed to respond "
- "(%lu secs, Status 0x%x)\n",
- DIV_ROUND_UP(tmout, 1000), status);
+ ata_port_err(ap,
+ "port failed to respond (%lu secs, Status 0x%x)\n",
+ DIV_ROUND_UP(tmout, 1000), status);
return -EBUSY;
}
@@ -350,8 +351,8 @@ static void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep)
{
if (ata_msg_probe(ap))
- ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
- "device %u, wait %u\n", device, wait);
+ ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n",
+ device, wait);
if (wait)
ata_wait_idle(ap);
@@ -569,7 +570,7 @@ unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
/* Transfer trailing byte, if any. */
if (unlikely(buflen & 0x01)) {
- unsigned char pad[2];
+ unsigned char pad[2] = { };
/* Point buf to the tail of buffer */
buf += buflen - 1;
@@ -628,7 +629,7 @@ unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf,
/* Transfer trailing bytes, if any */
if (unlikely(slop)) {
- unsigned char pad[4];
+ unsigned char pad[4] = { };
/* Point buf to the tail of buffer */
buf += buflen - slop;
@@ -678,7 +679,7 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
unsigned int consumed;
local_irq_save(flags);
- consumed = ata_sff_data_xfer(dev, buf, buflen, rw);
+ consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
local_irq_restore(flags);
return consumed;
@@ -1332,10 +1333,23 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
DPRINTK("ENTER\n");
cancel_delayed_work_sync(&ap->sff_pio_task);
+
+ /*
+ * We wanna reset the HSM state to IDLE. If we do so without
+ * grabbing the port lock, critical sections protected by it which
+ * expect the HSM state to stay stable may get surprised. For
+ * example, we may set IDLE in between the time
+ * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
+ * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
+ */
+ spin_lock_irq(ap->lock);
ap->hsm_task_state = HSM_ST_IDLE;
+ spin_unlock_irq(ap->lock);
+
+ ap->sff_pio_task_link = NULL;
if (ata_msg_ctl(ap))
- ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
+ ata_port_dbg(ap, "%s: EXIT\n", __func__);
}
static void ata_sff_pio_task(struct work_struct *work)
@@ -1513,7 +1527,7 @@ static unsigned int ata_sff_idle_irq(struct ata_port *ap)
ap->ops->sff_check_status(ap);
if (ap->ops->sff_irq_clear)
ap->ops->sff_irq_clear(ap);
- ata_port_printk(ap, KERN_WARNING, "irq trap\n");
+ ata_port_warn(ap, "irq trap\n");
return 1;
}
#endif
@@ -1711,7 +1725,7 @@ void ata_sff_lost_interrupt(struct ata_port *ap)
/* There was a command running, we are no longer busy and we have
no interrupt. */
- ata_port_printk(ap, KERN_WARNING, "lost interrupt (Status 0x%x)\n",
+ ata_port_warn(ap, "lost interrupt (Status 0x%x)\n",
status);
/* Run the host interrupt logic as if the interrupt had not been
lost */
@@ -1798,8 +1812,9 @@ int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
if (!ata_link_offline(link)) {
rc = ata_sff_wait_ready(link, deadline);
if (rc && rc != -ENODEV) {
- ata_link_printk(link, KERN_WARNING, "device not ready "
- "(errno=%d), forcing hardreset\n", rc);
+ ata_link_warn(link,
+ "device not ready (errno=%d), forcing hardreset\n",
+ rc);
ehc->i.action |= ATA_EH_HARDRESET;
}
}
@@ -2005,13 +2020,15 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
- /* software reset. causes dev0 to be selected */
- iowrite8(ap->ctl, ioaddr->ctl_addr);
- udelay(20); /* FIXME: flush */
- iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
- udelay(20); /* FIXME: flush */
- iowrite8(ap->ctl, ioaddr->ctl_addr);
- ap->last_ctl = ap->ctl;
+ if (ap->ioaddr.ctl_addr) {
+ /* software reset. causes dev0 to be selected */
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
+ udelay(20); /* FIXME: flush */
+ iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+ udelay(20); /* FIXME: flush */
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = ap->ctl;
+ }
/* wait the port to become ready */
return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
@@ -2056,7 +2073,7 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
rc = ata_bus_softreset(ap, devmask, deadline);
/* if link is occupied, -ENODEV too is an error */
if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
- ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
+ ata_link_err(link, "SRST failed (errno=%d)\n", rc);
return rc;
}
@@ -2170,8 +2187,7 @@ void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
/* Can become DEBUG later */
if (count)
- ata_port_printk(ap, KERN_DEBUG,
- "drained %d bytes to clear DRQ.\n", count);
+ ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
}
EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
@@ -2213,10 +2229,6 @@ void ata_sff_error_handler(struct ata_port *ap)
spin_unlock_irqrestore(ap->lock, flags);
- /* ignore ata_sff_softreset if ctl isn't accessible */
- if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
- softreset = NULL;
-
/* ignore built-in hardresets if SCR access is not available */
if ((hardreset == sata_std_hardreset ||
hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
@@ -2316,9 +2328,9 @@ int ata_pci_sff_init_host(struct ata_host *host)
rc = pcim_iomap_regions(pdev, 0x3 << base,
dev_driver_string(gdev));
if (rc) {
- dev_printk(KERN_WARNING, gdev,
- "failed to request/iomap BARs for port %d "
- "(errno=%d)\n", i, rc);
+ dev_warn(gdev,
+ "failed to request/iomap BARs for port %d (errno=%d)\n",
+ i, rc);
if (rc == -EBUSY)
pcim_pin_device(pdev);
ap->ops = &ata_dummy_port_ops;
@@ -2340,7 +2352,7 @@ int ata_pci_sff_init_host(struct ata_host *host)
}
if (!mask) {
- dev_printk(KERN_ERR, gdev, "no available native port\n");
+ dev_err(gdev, "no available native port\n");
return -ENODEV;
}
@@ -2375,8 +2387,7 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev,
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
if (!host) {
- dev_printk(KERN_ERR, &pdev->dev,
- "failed to allocate ATA host\n");
+ dev_err(&pdev->dev, "failed to allocate ATA host\n");
rc = -ENOMEM;
goto err_out;
}
@@ -2507,31 +2518,10 @@ static const struct ata_port_info *ata_sff_find_valid_pi(
return NULL;
}
-/**
- * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
- * @pdev: Controller to be initialized
- * @ppi: array of port_info, must be enough for two ports
- * @sht: scsi_host_template to use when registering the host
- * @host_priv: host private_data
- * @hflag: host flags
- *
- * This is a helper function which can be called from a driver's
- * xxx_init_one() probe function if the hardware uses traditional
- * IDE taskfile registers and is PIO only.
- *
- * ASSUMPTION:
- * Nobody makes a single channel controller that appears solely as
- * the secondary legacy port on PCI.
- *
- * LOCKING:
- * Inherited from PCI layer (may sleep).
- *
- * RETURNS:
- * Zero on success, negative on errno-based value on error.
- */
-int ata_pci_sff_init_one(struct pci_dev *pdev,
- const struct ata_port_info * const *ppi,
- struct scsi_host_template *sht, void *host_priv, int hflag)
+static int ata_pci_init_one(struct pci_dev *pdev,
+ const struct ata_port_info * const *ppi,
+ struct scsi_host_template *sht, void *host_priv,
+ int hflags, bool bmdma)
{
struct device *dev = &pdev->dev;
const struct ata_port_info *pi;
@@ -2542,8 +2532,7 @@ int ata_pci_sff_init_one(struct pci_dev *pdev,
pi = ata_sff_find_valid_pi(ppi);
if (!pi) {
- dev_printk(KERN_ERR, &pdev->dev,
- "no valid port_info specified\n");
+ dev_err(&pdev->dev, "no valid port_info specified\n");
return -EINVAL;
}
@@ -2554,14 +2543,26 @@ int ata_pci_sff_init_one(struct pci_dev *pdev,
if (rc)
goto out;
- /* prepare and activate SFF host */
- rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
+#ifdef CONFIG_ATA_BMDMA
+ if (bmdma)
+ /* prepare and activate BMDMA host */
+ rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
+ else
+#endif
+ /* prepare and activate SFF host */
+ rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
if (rc)
goto out;
host->private_data = host_priv;
- host->flags |= hflag;
+ host->flags |= hflags;
- rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
+#ifdef CONFIG_ATA_BMDMA
+ if (bmdma) {
+ pci_set_master(pdev);
+ rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
+ } else
+#endif
+ rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
out:
if (rc == 0)
devres_remove_group(&pdev->dev, NULL);
@@ -2570,6 +2571,35 @@ out:
return rc;
}
+
+/**
+ * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
+ * @pdev: Controller to be initialized
+ * @ppi: array of port_info, must be enough for two ports
+ * @sht: scsi_host_template to use when registering the host
+ * @host_priv: host private_data
+ * @hflag: host flags
+ *
+ * This is a helper function which can be called from a driver's
+ * xxx_init_one() probe function if the hardware uses traditional
+ * IDE taskfile registers and is PIO only.
+ *
+ * ASSUMPTION:
+ * Nobody makes a single channel controller that appears solely as
+ * the secondary legacy port on PCI.
+ *
+ * LOCKING:
+ * Inherited from PCI layer (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, negative on errno-based value on error.
+ */
+int ata_pci_sff_init_one(struct pci_dev *pdev,
+ const struct ata_port_info * const *ppi,
+ struct scsi_host_template *sht, void *host_priv, int hflag)
+{
+ return ata_pci_init_one(pdev, ppi, sht, host_priv, hflag, 0);
+}
EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
#endif /* CONFIG_PCI */
@@ -3164,8 +3194,7 @@ static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
{
int i;
- dev_printk(KERN_ERR, host->dev, "BMDMA: %s, falling back to PIO\n",
- reason);
+ dev_err(host->dev, "BMDMA: %s, falling back to PIO\n", reason);
for (i = 0; i < 2; i++) {
host->ports[i]->mwdma_mask = 0;
@@ -3288,43 +3317,7 @@ int ata_pci_bmdma_init_one(struct pci_dev *pdev,
struct scsi_host_template *sht, void *host_priv,
int hflags)
{
- struct device *dev = &pdev->dev;
- const struct ata_port_info *pi;
- struct ata_host *host = NULL;
- int rc;
-
- DPRINTK("ENTER\n");
-
- pi = ata_sff_find_valid_pi(ppi);
- if (!pi) {
- dev_printk(KERN_ERR, &pdev->dev,
- "no valid port_info specified\n");
- return -EINVAL;
- }
-
- if (!devres_open_group(dev, NULL, GFP_KERNEL))
- return -ENOMEM;
-
- rc = pcim_enable_device(pdev);
- if (rc)
- goto out;
-
- /* prepare and activate BMDMA host */
- rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
- if (rc)
- goto out;
- host->private_data = host_priv;
- host->flags |= hflags;
-
- pci_set_master(pdev);
- rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
- out:
- if (rc == 0)
- devres_remove_group(&pdev->dev, NULL);
- else
- devres_release_group(&pdev->dev, NULL);
-
- return rc;
+ return ata_pci_init_one(pdev, ppi, sht, host_priv, hflags, 1);
}
EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index ce9dc62..c01f040 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -312,25 +312,25 @@ int ata_tport_add(struct device *parent,
/*
* ATA link attributes
*/
+static int noop(int x) { return x; }
-
-#define ata_link_show_linkspeed(field) \
+#define ata_link_show_linkspeed(field, format) \
static ssize_t \
show_ata_link_##field(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct ata_link *link = transport_class_to_link(dev); \
\
- return sprintf(buf,"%s\n", sata_spd_string(fls(link->field))); \
+ return sprintf(buf, "%s\n", sata_spd_string(format(link->field))); \
}
-#define ata_link_linkspeed_attr(field) \
- ata_link_show_linkspeed(field) \
+#define ata_link_linkspeed_attr(field, format) \
+ ata_link_show_linkspeed(field, format) \
static DEVICE_ATTR(field, S_IRUGO, show_ata_link_##field, NULL)
-ata_link_linkspeed_attr(hw_sata_spd_limit);
-ata_link_linkspeed_attr(sata_spd_limit);
-ata_link_linkspeed_attr(sata_spd);
+ata_link_linkspeed_attr(hw_sata_spd_limit, fls);
+ata_link_linkspeed_attr(sata_spd_limit, fls);
+ata_link_linkspeed_attr(sata_spd, noop);
static DECLARE_TRANSPORT_CLASS(ata_link_class,
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index 91949d9..54145ed 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -195,8 +195,6 @@ static int pacpi_port_start(struct ata_port *ap)
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct pata_acpi *acpi;
- int ret;
-
if (ap->acpi_handle == NULL)
return -ENODEV;
@@ -205,11 +203,7 @@ static int pacpi_port_start(struct ata_port *ap)
return -ENOMEM;
acpi->mask[0] = pacpi_discover_modes(ap, &ap->link.device[0]);
acpi->mask[1] = pacpi_discover_modes(ap, &ap->link.device[1]);
- ret = ata_bmdma_port_start(ap);
- if (ret < 0)
- return ret;
-
- return ret;
+ return ata_bmdma_port_start(ap);
}
static struct scsi_host_template pacpi_sht = {
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 794ec6e..61da069 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -56,7 +56,7 @@ static const struct dmi_system_id cable_dmi_table[] = {
},
},
{
- .ident = "Toshiba Satelite S1800-814",
+ .ident = "Toshiba Satellite S1800-814",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "S1800-814"),
@@ -287,10 +287,10 @@ static void ali_warn_atapi_dma(struct ata_device *adev)
int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
if (print_info && adev->class == ATA_DEV_ATAPI && !ali_atapi_dma) {
- ata_dev_printk(adev, KERN_WARNING,
- "WARNING: ATAPI DMA disabled for reliability issues. It can be enabled\n");
- ata_dev_printk(adev, KERN_WARNING,
- "WARNING: via pata_ali.atapi_dma modparam or corresponding sysfs node.\n");
+ ata_dev_warn(adev,
+ "WARNING: ATAPI DMA disabled for reliability issues. It can be enabled\n");
+ ata_dev_warn(adev,
+ "WARNING: via pata_ali.atapi_dma modparam or corresponding sysfs node.\n");
}
}
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index b0975a5..dc6b5da 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -60,7 +60,7 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse
UT = T / 2;
if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
- dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed);
+ dev_err(&pdev->dev, "unknown mode %d\n", speed);
return;
}
@@ -311,7 +311,7 @@ static unsigned long nv_mode_filter(struct ata_device *dev,
cable detection result */
limit |= ata_pack_xfermask(ATA_PIO4, ATA_MWDMA2, ATA_UDMA2);
- ata_port_printk(ap, KERN_DEBUG, "nv_mode_filter: 0x%lx&0x%lx->0x%lx, "
+ ata_port_dbg(ap, "nv_mode_filter: 0x%lx&0x%lx->0x%lx, "
"BIOS=0x%lx (0x%x) ACPI=0x%lx%s\n",
xfer_mask, limit, xfer_mask & limit, bios_limit,
saved_udma, acpi_limit, acpi_str);
@@ -530,14 +530,12 @@ static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
};
const struct ata_port_info *ppi[] = { NULL, NULL };
- static int printed_version;
int type = id->driver_data;
void *hpriv = NULL;
u8 fifo;
int rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 719bb73..e8574bb 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -922,8 +922,7 @@ static int __devexit arasan_cf_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int arasan_cf_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ata_host *host = dev_get_drvdata(dev);
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
if (acdev->dma_chan) {
@@ -937,8 +936,7 @@ static int arasan_cf_suspend(struct device *dev)
static int arasan_cf_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ata_host *host = dev_get_drvdata(dev);
struct arasan_cf_dev *acdev = host->ports[0]->private_data;
cf_init(acdev);
diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
index 2215632..4b8b22e 100644
--- a/drivers/ata/pata_artop.c
+++ b/drivers/ata/pata_artop.c
@@ -2,7 +2,7 @@
* pata_artop.c - ARTOP ATA controller driver
*
* (C) 2006 Red Hat
- * (C) 2007 Bartlomiej Zolnierkiewicz
+ * (C) 2007,2011 Bartlomiej Zolnierkiewicz
*
* Based in part on drivers/ide/pci/aec62xx.c
* Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
@@ -28,7 +28,7 @@
#include <linux/ata.h>
#define DRV_NAME "pata_artop"
-#define DRV_VERSION "0.4.5"
+#define DRV_VERSION "0.4.6"
/*
* The ARTOP has 33 Mhz and "over clocked" timing tables. Until we
@@ -39,31 +39,15 @@
static int clock = 0;
-static int artop6210_pre_reset(struct ata_link *link, unsigned long deadline)
-{
- struct ata_port *ap = link->ap;
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- const struct pci_bits artop_enable_bits[] = {
- { 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
- { 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */
- };
-
- if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
- return -ENOENT;
-
- return ata_sff_prereset(link, deadline);
-}
-
/**
- * artop6260_pre_reset - check for 40/80 pin
+ * artop62x0_pre_reset - probe begin
* @link: link
* @deadline: deadline jiffies for the operation
*
- * The ARTOP hardware reports the cable detect bits in register 0x49.
* Nothing complicated needed here.
*/
-static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
+static int artop62x0_pre_reset(struct ata_link *link, unsigned long deadline)
{
static const struct pci_bits artop_enable_bits[] = {
{ 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
@@ -73,7 +57,7 @@ static int artop6260_pre_reset(struct ata_link *link, unsigned long deadline)
struct ata_port *ap = link->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- /* Odd numbered device ids are the units with enable bits (the -R cards) */
+ /* Odd numbered device ids are the units with enable bits. */
if ((pdev->device & 1) &&
!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no]))
return -ENOENT;
@@ -317,7 +301,7 @@ static struct ata_port_operations artop6210_ops = {
.cable_detect = ata_cable_40wire,
.set_piomode = artop6210_set_piomode,
.set_dmamode = artop6210_set_dmamode,
- .prereset = artop6210_pre_reset,
+ .prereset = artop62x0_pre_reset,
.qc_defer = artop6210_qc_defer,
};
@@ -326,9 +310,36 @@ static struct ata_port_operations artop6260_ops = {
.cable_detect = artop6260_cable_detect,
.set_piomode = artop6260_set_piomode,
.set_dmamode = artop6260_set_dmamode,
- .prereset = artop6260_pre_reset,
+ .prereset = artop62x0_pre_reset,
};
+static void atp8xx_fixup(struct pci_dev *pdev)
+{
+ if (pdev->device == 0x0005)
+ /* BIOS may have left us in UDMA, clear it before libata probe */
+ pci_write_config_byte(pdev, 0x54, 0);
+ else if (pdev->device == 0x0008 || pdev->device == 0x0009) {
+ u8 reg;
+
+ /* Mac systems come up with some registers not set as we
+ will need them */
+
+ /* Clear reset & test bits */
+ pci_read_config_byte(pdev, 0x49, &reg);
+ pci_write_config_byte(pdev, 0x49, reg & ~0x30);
+
+ /* PCI latency must be > 0x80 for burst mode, tweak it
+ * if required.
+ */
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &reg);
+ if (reg <= 0x80)
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90);
+
+ /* Enable IRQ output and burst mode */
+ pci_read_config_byte(pdev, 0x4a, &reg);
+ pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80);
+ }
+}
/**
* artop_init_one - Register ARTOP ATA PCI device with kernel services
@@ -346,7 +357,6 @@ static struct ata_port_operations artop6260_ops = {
static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
{
- static int printed_version;
static const struct ata_port_info info_6210 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -378,50 +388,28 @@ static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
const struct ata_port_info *ppi[] = { NULL, NULL };
int rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
return rc;
- if (id->driver_data == 0) { /* 6210 variant */
+ if (id->driver_data == 0) /* 6210 variant */
ppi[0] = &info_6210;
- /* BIOS may have left us in UDMA, clear it before libata probe */
- pci_write_config_byte(pdev, 0x54, 0);
- }
else if (id->driver_data == 1) /* 6260 */
ppi[0] = &info_626x;
else if (id->driver_data == 2) { /* 6280 or 6280 + fast */
unsigned long io = pci_resource_start(pdev, 4);
- u8 reg;
ppi[0] = &info_628x;
if (inb(io) & 0x10)
ppi[0] = &info_628x_fast;
- /* Mac systems come up with some registers not set as we
- will need them */
-
- /* Clear reset & test bits */
- pci_read_config_byte(pdev, 0x49, &reg);
- pci_write_config_byte(pdev, 0x49, reg & ~ 0x30);
-
- /* PCI latency must be > 0x80 for burst mode, tweak it
- * if required.
- */
- pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &reg);
- if (reg <= 0x80)
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90);
-
- /* Enable IRQ output and burst mode */
- pci_read_config_byte(pdev, 0x4a, &reg);
- pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80);
-
}
BUG_ON(ppi[0] == NULL);
+ atp8xx_fixup(pdev);
+
return ata_pci_bmdma_init_one(pdev, ppi, &artop_sht, NULL, 0);
}
@@ -435,11 +423,32 @@ static const struct pci_device_id artop_pci_tbl[] = {
{ } /* terminate list */
};
+#ifdef CONFIG_PM
+static int atp8xx_reinit_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ atp8xx_fixup(pdev);
+
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
static struct pci_driver artop_pci_driver = {
.name = DRV_NAME,
.id_table = artop_pci_tbl,
.probe = artop_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = atp8xx_reinit_one,
+#endif
};
static int __init artop_init(void)
@@ -455,9 +464,8 @@ static void __exit artop_exit(void)
module_init(artop_init);
module_exit(artop_exit);
-MODULE_AUTHOR("Alan Cox");
+MODULE_AUTHOR("Alan Cox, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, artop_pci_tbl);
MODULE_VERSION(DRV_VERSION);
-
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 960c725..a76f24a 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -30,7 +30,7 @@
#include <mach/at91sam9_smc.h>
#include <mach/board.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#define DRV_NAME "pata_at91"
#define DRV_VERSION "0.3"
@@ -414,10 +414,13 @@ static int __devinit pata_at91_probe(struct platform_device *pdev)
host->private_data = info;
- return ata_host_activate(host, irq ? gpio_to_irq(irq) : 0,
+ ret = ata_host_activate(host, irq ? gpio_to_irq(irq) : 0,
irq ? ata_sff_interrupt : NULL,
irq_flags, &pata_at91_sht);
+ if (!ret)
+ return 0;
+
err_put:
clk_put(info->mck);
return ret;
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 4375561..be1aa14 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -49,6 +49,31 @@ static int atiixp_cable_detect(struct ata_port *ap)
static DEFINE_SPINLOCK(atiixp_lock);
/**
+ * atiixp_prereset - perform reset handling
+ * @link: ATA link
+ * @deadline: deadline jiffies for the operation
+ *
+ * Reset sequence checking enable bits to see which ports are
+ * active.
+ */
+
+static int atiixp_prereset(struct ata_link *link, unsigned long deadline)
+{
+ static const struct pci_bits atiixp_enable_bits[] = {
+ { 0x48, 1, 0x01, 0x00 },
+ { 0x48, 1, 0x08, 0x00 }
+ };
+
+ struct ata_port *ap = link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+
+ if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
+ return -ENOENT;
+
+ return ata_sff_prereset(link, deadline);
+}
+
+/**
* atiixp_set_pio_timing - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
@@ -221,6 +246,7 @@ static struct ata_port_operations atiixp_port_ops = {
.bmdma_start = atiixp_bmdma_start,
.bmdma_stop = atiixp_bmdma_stop,
+ .prereset = atiixp_prereset,
.cable_detect = atiixp_cable_detect,
.set_piomode = atiixp_set_piomode,
.set_dmamode = atiixp_set_dmamode,
@@ -235,16 +261,7 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
.udma_mask = ATA_UDMA5,
.port_ops = &atiixp_port_ops
};
- static const struct pci_bits atiixp_enable_bits[] = {
- { 0x48, 1, 0x01, 0x00 },
- { 0x48, 1, 0x08, 0x00 }
- };
const struct ata_port_info *ppi[] = { &info, &info };
- int i;
-
- for (i = 0; i < 2; i++)
- if (!pci_test_config_bits(pdev, &atiixp_enable_bits[i]))
- ppi[i] = &ata_dummy_port_info;
return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
ATA_HOST_PARALLEL_SCAN);
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index 9529593..3cfabb2 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -470,7 +470,7 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
}
if (!mask) {
- dev_printk(KERN_ERR, gdev, "no available native port\n");
+ dev_err(gdev, "no available native port\n");
return -ENODEV;
}
@@ -487,7 +487,6 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
static int atp867x_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- static int printed_version;
static const struct ata_port_info info_867x = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -499,8 +498,7 @@ static int atp867x_init_one(struct pci_dev *pdev,
const struct ata_port_info *ppi[] = { &info_867x, NULL };
int rc;
- if (!printed_version++)
- dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
@@ -511,15 +509,14 @@ static int atp867x_init_one(struct pci_dev *pdev,
host = ata_host_alloc_pinfo(&pdev->dev, ppi, ATP867X_NUM_PORTS);
if (!host) {
- dev_printk(KERN_ERR, &pdev->dev,
- "failed to allocate ATA host\n");
+ dev_err(&pdev->dev, "failed to allocate ATA host\n");
rc = -ENOMEM;
goto err_out;
}
rc = atp867x_ata_pci_sff_init_host(host);
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev, "failed to init host\n");
+ dev_err(&pdev->dev, "failed to init host\n");
goto err_out;
}
@@ -528,7 +525,7 @@ static int atp867x_init_one(struct pci_dev *pdev,
rc = ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
IRQF_SHARED, &atp867x_sht);
if (rc)
- dev_printk(KERN_ERR, &pdev->dev, "failed to activate host\n");
+ dev_err(&pdev->dev, "failed to activate host\n");
err_out:
return rc;
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index ea64967..bd987bb 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -1129,7 +1129,7 @@ static int bfin_softreset(struct ata_link *link, unsigned int *classes,
/* issue bus reset */
err_mask = bfin_bus_softreset(ap, devmask);
if (err_mask) {
- ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
+ ata_port_err(ap, "SRST failed (err_mask=0x%x)\n",
err_mask);
return -EIO;
}
@@ -1382,7 +1382,7 @@ idle_irq:
#ifdef ATA_IRQ_TRAP
if ((ap->stats.idle_irq % 1000) == 0) {
ap->ops->irq_ack(ap, 0); /* debug trap */
- ata_port_printk(ap, KERN_WARNING, "irq trap\n");
+ ata_port_warn(ap, "irq trap\n");
return 1;
}
#endif
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 7bafc16..e1fb39a 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -82,7 +82,7 @@ static int cmd648_cable_detect(struct ata_port *ap)
}
/**
- * cmd64x_set_piomode - set PIO and MWDMA timing
+ * cmd64x_set_timing - set PIO and MWDMA timing
* @ap: ATA interface
* @adev: ATA device
* @mode: mode
@@ -288,6 +288,22 @@ static struct ata_port_operations cmd648_port_ops = {
.cable_detect = cmd648_cable_detect,
};
+static void cmd64x_fixup(struct pci_dev *pdev)
+{
+ u8 mrdmode;
+
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
+ pci_read_config_byte(pdev, MRDMODE, &mrdmode);
+ mrdmode &= ~0x30; /* IRQ set up */
+ mrdmode |= 0x02; /* Memory read line enable */
+ pci_write_config_byte(pdev, MRDMODE, mrdmode);
+
+ /* PPC specific fixup copied from old driver */
+#ifdef CONFIG_PPC
+ pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
+#endif
+}
+
static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct ata_port_info cmd_info[6] = {
@@ -336,7 +352,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
&cmd_info[id->driver_data],
NULL
};
- u8 mrdmode, reg;
+ u8 reg;
int rc;
struct pci_dev *bridge = pdev->bus->self;
/* mobility split bridges don't report enabled ports correctly */
@@ -368,11 +384,7 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
cntrl_ch0_ok = 0;
}
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
- pci_read_config_byte(pdev, MRDMODE, &mrdmode);
- mrdmode &= ~ 0x30; /* IRQ set up */
- mrdmode |= 0x02; /* Memory read line enable */
- pci_write_config_byte(pdev, MRDMODE, mrdmode);
+ cmd64x_fixup(pdev);
/* check for enabled ports */
pci_read_config_byte(pdev, CNTRL, &reg);
@@ -388,13 +400,6 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
ppi[1] = &ata_dummy_port_info;
}
- /* Force PIO 0 here.. */
-
- /* PPC specific fixup copied from old driver */
-#ifdef CONFIG_PPC
- pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
-#endif
-
return ata_pci_bmdma_init_one(pdev, ppi, &cmd64x_sht, NULL, 0);
}
@@ -402,21 +407,14 @@ static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
static int cmd64x_reinit_one(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
- u8 mrdmode;
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
- pci_read_config_byte(pdev, MRDMODE, &mrdmode);
- mrdmode &= ~ 0x30; /* IRQ set up */
- mrdmode |= 0x02; /* Memory read line enable */
- pci_write_config_byte(pdev, MRDMODE, mrdmode);
-#ifdef CONFIG_PPC
- pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
-#endif
+ cmd64x_fixup(pdev);
+
ata_host_resume(host);
return 0;
}
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index e3254fc..9ddcddc 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -149,8 +149,7 @@ static int __devinit cs5520_init_one(struct pci_dev *pdev, const struct pci_devi
ppi[1] = &pi;
if ((pcicfg & 0x40) == 0) {
- dev_printk(KERN_WARNING, &pdev->dev,
- "DMA mode disabled. Enabling.\n");
+ dev_warn(&pdev->dev, "DMA mode disabled. Enabling.\n");
pci_write_config_byte(pdev, 0x60, pcicfg | 0x40);
}
diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
index 03a9318..a0b4640 100644
--- a/drivers/ata/pata_cs5535.c
+++ b/drivers/ata/pata_cs5535.c
@@ -38,7 +38,7 @@
#include <linux/libata.h>
#include <asm/msr.h>
-#define DRV_NAME "cs5535"
+#define DRV_NAME "pata_cs5535"
#define DRV_VERSION "0.2.12"
/*
@@ -67,8 +67,6 @@
#define CS5535_CABLE_DETECT 0x48
-#define CS5535_BAD_PIO(timings) ( (timings&~0x80000000UL)==0x00009172 )
-
/**
* cs5535_cable_detect - detect cable type
* @ap: Port to detect on
@@ -188,16 +186,6 @@ static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
};
const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
- u32 timings, dummy;
-
- /* Check the BIOS set the initial timing clock. If not set the
- timings for PIO0 */
- rdmsr(ATAC_CH0D0_PIO, timings, dummy);
- if (CS5535_BAD_PIO(timings))
- wrmsr(ATAC_CH0D0_PIO, 0xF7F4F7F4UL, 0);
- rdmsr(ATAC_CH0D1_PIO, timings, dummy);
- if (CS5535_BAD_PIO(timings))
- wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0);
return ata_pci_bmdma_init_one(dev, ppi, &cs5535_sht, NULL, 0);
}
@@ -230,7 +218,7 @@ static void __exit cs5535_exit(void)
}
MODULE_AUTHOR("Alan Cox, Jens Altmann, Wolfgan Zuleger, Alexander Kiausch");
-MODULE_DESCRIPTION("low-level driver for the NS/AMD 5530");
+MODULE_DESCRIPTION("low-level driver for the NS/AMD 5535");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, cs5535);
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
index a088347..f0243ed 100644
--- a/drivers/ata/pata_efar.c
+++ b/drivers/ata/pata_efar.c
@@ -73,7 +73,7 @@ static DEFINE_SPINLOCK(efar_lock);
/**
* efar_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
- * @adev: um
+ * @adev: Device to program
*
* Set PIO mode for device, in host controller PCI config space.
*
@@ -85,9 +85,9 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
- unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
+ unsigned int master_port = ap->port_no ? 0x42 : 0x40;
unsigned long flags;
- u16 idetm_data;
+ u16 master_data;
u8 udma_enable;
int control = 0;
@@ -113,20 +113,20 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
spin_lock_irqsave(&efar_lock, flags);
- pci_read_config_word(dev, idetm_port, &idetm_data);
+ pci_read_config_word(dev, master_port, &master_data);
/* Set PPE, IE, and TIME as appropriate */
if (adev->devno == 0) {
- idetm_data &= 0xCCF0;
- idetm_data |= control;
- idetm_data |= (timings[pio][0] << 12) |
+ master_data &= 0xCCF0;
+ master_data |= control;
+ master_data |= (timings[pio][0] << 12) |
(timings[pio][1] << 8);
} else {
int shift = 4 * ap->port_no;
u8 slave_data;
- idetm_data &= 0xFF0F;
- idetm_data |= (control << 4);
+ master_data &= 0xFF0F;
+ master_data |= (control << 4);
/* Slave timing in separate register */
pci_read_config_byte(dev, 0x44, &slave_data);
@@ -135,8 +135,8 @@ static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
pci_write_config_byte(dev, 0x44, slave_data);
}
- idetm_data |= 0x4000; /* Ensure SITRE is set */
- pci_write_config_word(dev, idetm_port, idetm_data);
+ master_data |= 0x4000; /* Ensure SITRE is set */
+ pci_write_config_word(dev, master_port, master_data);
pci_read_config_byte(dev, 0x48, &udma_enable);
udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
@@ -263,7 +263,6 @@ static struct ata_port_operations efar_ops = {
static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -273,9 +272,7 @@ static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
};
const struct ata_port_info *ppi[] = { &info, &info };
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(pdev, ppi, &efar_sht, NULL,
ATA_HOST_PARALLEL_SCAN);
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 6c77d68..42cffd3 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -111,6 +111,28 @@ static const struct hpt_clock hpt366_25[] = {
{ 0, 0x01208585 }
};
+/**
+ * hpt36x_find_mode - find the hpt36x timing
+ * @ap: ATA port
+ * @speed: transfer mode
+ *
+ * Return the 32bit register programming information for this channel
+ * that matches the speed provided.
+ */
+
+static u32 hpt36x_find_mode(struct ata_port *ap, int speed)
+{
+ struct hpt_clock *clocks = ap->host->private_data;
+
+ while (clocks->xfer_mode) {
+ if (clocks->xfer_mode == speed)
+ return clocks->timing;
+ clocks++;
+ }
+ BUG();
+ return 0xffffffffU; /* silence compiler warning */
+}
+
static const char * const bad_ata33[] = {
"Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3",
"Maxtor 90845U3", "Maxtor 90650U2",
@@ -210,10 +232,9 @@ static int hpt36x_cable_detect(struct ata_port *ap)
static void hpt366_set_mode(struct ata_port *ap, struct ata_device *adev,
u8 mode)
{
- struct hpt_clock *clocks = ap->host->private_data;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 addr = 0x40 + 4 * adev->devno;
- u32 mask, reg;
+ u32 mask, reg, t;
/* determine timing mask and find matching clock entry */
if (mode < XFER_MW_DMA_0)
@@ -223,13 +244,7 @@ static void hpt366_set_mode(struct ata_port *ap, struct ata_device *adev,
else
mask = 0x30070000;
- while (clocks->xfer_mode) {
- if (clocks->xfer_mode == mode)
- break;
- clocks++;
- }
- if (!clocks->xfer_mode)
- BUG();
+ t = hpt36x_find_mode(ap, mode);
/*
* Combine new mode bits with old config bits and disable
@@ -237,7 +252,7 @@ static void hpt366_set_mode(struct ata_port *ap, struct ata_device *adev,
* problems handling I/O errors later.
*/
pci_read_config_dword(pdev, addr, &reg);
- reg = ((reg & ~mask) | (clocks->timing & mask)) & ~0xc0000000;
+ reg = ((reg & ~mask) | (t & mask)) & ~0xc0000000;
pci_write_config_dword(pdev, addr, reg);
}
diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
index 24d7df8..b3042da 100644
--- a/drivers/ata/pata_hpt3x3.c
+++ b/drivers/ata/pata_hpt3x3.c
@@ -185,7 +185,6 @@ static void hpt3x3_init_chipset(struct pci_dev *dev)
static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
- static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -206,8 +205,7 @@ static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
hpt3x3_init_chipset(pdev);
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
if (!host)
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
index 9f2889f..52e7e7b 100644
--- a/drivers/ata/pata_icside.c
+++ b/drivers/ata/pata_icside.c
@@ -210,8 +210,8 @@ static void pata_icside_set_dmamode(struct ata_port *ap, struct ata_device *adev
else
iomd_type = 'A', cycle = 562;
- ata_dev_printk(adev, KERN_INFO, "timings: act %dns rec %dns cyc %dns (%c)\n",
- t.active, t.recover, t.cycle, iomd_type);
+ ata_dev_info(adev, "timings: act %dns rec %dns cyc %dns (%c)\n",
+ t.active, t.recover, t.cycle, iomd_type);
state->port[ap->port_no].speed[adev->devno] = cycle;
}
diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
index 4d142a2..cf9164d 100644
--- a/drivers/ata/pata_it8213.c
+++ b/drivers/ata/pata_it8213.c
@@ -76,8 +76,8 @@ static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
- unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
- u16 idetm_data;
+ unsigned int master_port = ap->port_no ? 0x42 : 0x40;
+ u16 master_data;
int control = 0;
/*
@@ -100,19 +100,19 @@ static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev)
if (adev->class != ATA_DEV_ATA)
control |= 4; /* PPE */
- pci_read_config_word(dev, idetm_port, &idetm_data);
+ pci_read_config_word(dev, master_port, &master_data);
/* Set PPE, IE, and TIME as appropriate */
if (adev->devno == 0) {
- idetm_data &= 0xCCF0;
- idetm_data |= control;
- idetm_data |= (timings[pio][0] << 12) |
+ master_data &= 0xCCF0;
+ master_data |= control;
+ master_data |= (timings[pio][0] << 12) |
(timings[pio][1] << 8);
} else {
u8 slave_data;
- idetm_data &= 0xFF0F;
- idetm_data |= (control << 4);
+ master_data &= 0xFF0F;
+ master_data |= (control << 4);
/* Slave timing in separate register */
pci_read_config_byte(dev, 0x44, &slave_data);
@@ -121,8 +121,8 @@ static void it8213_set_piomode (struct ata_port *ap, struct ata_device *adev)
pci_write_config_byte(dev, 0x44, slave_data);
}
- idetm_data |= 0x4000; /* Ensure SITRE is set */
- pci_write_config_word(dev, idetm_port, idetm_data);
+ master_data |= 0x4000; /* Ensure SITRE is set */
+ pci_write_config_word(dev, master_port, master_data);
}
/**
@@ -163,7 +163,7 @@ static void it8213_set_dmamode (struct ata_port *ap, struct ata_device *adev)
/* Clocks follow the PIIX style */
u_speed = min(2 - (udma & 1), udma);
- if (udma == 5)
+ if (udma > 4)
u_clock = 0x1000; /* 100Mhz */
else if (udma > 2)
u_clock = 1; /* 66Mhz */
@@ -258,20 +258,17 @@ static struct ata_port_operations it8213_ops = {
static int it8213_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA12_ONLY,
- .udma_mask = ATA_UDMA4, /* FIXME: want UDMA 100? */
+ .udma_mask = ATA_UDMA6,
.port_ops = &it8213_ops,
};
/* Current IT8213 stuff is single port */
const struct ata_port_info *ppi[] = { &info, &ata_dummy_port_info };
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(pdev, ppi, &it8213_sht, NULL, 0);
}
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 2d15f25..62c5d00 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -473,12 +473,12 @@ static int it821x_smart_set_mode(struct ata_link *link, struct ata_device **unus
/* We do need the right mode information for DMA or PIO
and this comes from the current configuration flags */
if (ata_id_has_dma(dev->id)) {
- ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
+ ata_dev_info(dev, "configured for DMA\n");
dev->xfer_mode = XFER_MW_DMA_0;
dev->xfer_shift = ATA_SHIFT_MWDMA;
dev->flags &= ~ATA_DFLAG_PIO;
} else {
- ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ ata_dev_info(dev, "configured for PIO\n");
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
@@ -508,12 +508,12 @@ static void it821x_dev_config(struct ata_device *adev)
if (strstr(model_num, "Integrated Technology Express")) {
/* RAID mode */
- ata_dev_printk(adev, KERN_INFO, "%sRAID%d volume",
- adev->id[147]?"Bootable ":"",
- adev->id[129]);
+ ata_dev_info(adev, "%sRAID%d volume",
+ adev->id[147] ? "Bootable " : "",
+ adev->id[129]);
if (adev->id[129] != 1)
- printk("(%dK stripe)", adev->id[146]);
- printk(".\n");
+ pr_cont("(%dK stripe)", adev->id[146]);
+ pr_cont("\n");
}
/* This is a controller firmware triggered funny, don't
report the drive faulty! */
@@ -610,7 +610,7 @@ static void it821x_display_disk(int n, u8 *buf)
char *cbl = "(40 wire cable)";
static const char *types[5] = {
- "RAID0", "RAID1" "RAID 0+1", "JBOD", "DISK"
+ "RAID0", "RAID1", "RAID 0+1", "JBOD", "DISK"
};
if (buf[52] > 4) /* No Disk */
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index f6b3f99..15b64311 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -31,7 +31,7 @@ static int ixp4xx_set_mode(struct ata_link *link, struct ata_device **error)
struct ata_device *dev;
ata_for_each_dev(dev, link, ENABLED) {
- ata_dev_printk(dev, KERN_INFO, "configured for PIO0\n");
+ ata_dev_info(dev, "configured for PIO0\n");
dev->pio_mode = XFER_PIO_0;
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
@@ -181,7 +181,7 @@ static __devinit int ixp4xx_pata_probe(struct platform_device *pdev)
ixp4xx_setup_port(ap, data, cs0->start, cs1->start);
- dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* activate host */
return ata_host_activate(host, irq, ata_sff_interrupt, 0, &ixp4xx_sht);
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index d750962..4fe9d21 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -79,15 +79,6 @@ static int all;
module_param(all, int, 0444);
MODULE_PARM_DESC(all, "Grab all legacy port devices, even if PCI(0=off, 1=on)");
-struct legacy_data {
- unsigned long timing;
- u8 clock[2];
- u8 last;
- int fast;
- struct platform_device *platform_dev;
-
-};
-
enum controller {
BIOS = 0,
SNOOP = 1,
@@ -104,6 +95,14 @@ enum controller {
UNKNOWN = -1
};
+struct legacy_data {
+ unsigned long timing;
+ u8 clock[2];
+ u8 last;
+ int fast;
+ enum controller type;
+ struct platform_device *platform_dev;
+};
struct legacy_probe {
unsigned char *name;
@@ -137,11 +136,17 @@ static int ht6560a; /* HT 6560A on primary 1, second 2, both 3 */
static int ht6560b; /* HT 6560A on primary 1, second 2, both 3 */
static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */
static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */
-static int qdi; /* Set to probe QDI controllers */
static int autospeed; /* Chip present which snoops speed changes */
static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
+/* Set to probe QDI controllers */
+#ifdef CONFIG_PATA_QDI_MODULE
+static int qdi = 1;
+#else
+static int qdi;
+#endif
+
#ifdef CONFIG_PATA_WINBOND_VLB_MODULE
static int winbond = 1; /* Set to probe Winbond controllers,
give I/O port if non standard */
@@ -213,7 +218,7 @@ static int legacy_set_mode(struct ata_link *link, struct ata_device **unused)
struct ata_device *dev;
ata_for_each_dev(dev, link, ENABLED) {
- ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ ata_dev_info(dev, "configured for PIO\n");
dev->pio_mode = XFER_PIO_0;
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
@@ -630,40 +635,20 @@ static struct ata_port_operations opti82c46x_port_ops = {
.qc_issue = opti82c46x_qc_issue,
};
-static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- struct ata_timing t;
- struct legacy_data *ld_qdi = ap->host->private_data;
- int active, recovery;
- u8 timing;
-
- /* Get the timing data in cycles */
- ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
-
- if (ld_qdi->fast) {
- active = 8 - clamp_val(t.active, 1, 8);
- recovery = 18 - clamp_val(t.recover, 3, 18);
- } else {
- active = 9 - clamp_val(t.active, 2, 9);
- recovery = 15 - clamp_val(t.recover, 0, 15);
- }
- timing = (recovery << 4) | active | 0x08;
-
- ld_qdi->clock[adev->devno] = timing;
-
- outb(timing, ld_qdi->timing);
-}
-
/**
- * qdi6580dp_set_piomode - PIO setup for dual channel
+ * qdi65x0_set_piomode - PIO setup for QDI65x0
* @ap: Port
* @adev: Device
*
+ * In single channel mode the 6580 has one clock per device and we can
+ * avoid the requirement to clock switch. We also have to load the timing
+ * into the right clock according to whether we are master or slave.
+ *
* In dual channel mode the 6580 has one clock per channel and we have
* to software clockswitch in qc_issue.
*/
-static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev)
+static void qdi65x0_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_timing t;
struct legacy_data *ld_qdi = ap->host->private_data;
@@ -681,47 +666,15 @@ static void qdi6580dp_set_piomode(struct ata_port *ap, struct ata_device *adev)
recovery = 15 - clamp_val(t.recover, 0, 15);
}
timing = (recovery << 4) | active | 0x08;
-
ld_qdi->clock[adev->devno] = timing;
- outb(timing, ld_qdi->timing + 2 * ap->port_no);
- /* Clear the FIFO */
- if (adev->class != ATA_DEV_ATA)
- outb(0x5F, (ld_qdi->timing & 0xFFF0) + 3);
-}
-
-/**
- * qdi6580_set_piomode - PIO setup for single channel
- * @ap: Port
- * @adev: Device
- *
- * In single channel mode the 6580 has one clock per device and we can
- * avoid the requirement to clock switch. We also have to load the timing
- * into the right clock according to whether we are master or slave.
- */
-
-static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- struct ata_timing t;
- struct legacy_data *ld_qdi = ap->host->private_data;
- int active, recovery;
- u8 timing;
-
- /* Get the timing data in cycles */
- ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
+ if (ld_qdi->type == QDI6580)
+ outb(timing, ld_qdi->timing + 2 * adev->devno);
+ else
+ outb(timing, ld_qdi->timing + 2 * ap->port_no);
- if (ld_qdi->fast) {
- active = 8 - clamp_val(t.active, 1, 8);
- recovery = 18 - clamp_val(t.recover, 3, 18);
- } else {
- active = 9 - clamp_val(t.active, 2, 9);
- recovery = 15 - clamp_val(t.recover, 0, 15);
- }
- timing = (recovery << 4) | active | 0x08;
- ld_qdi->clock[adev->devno] = timing;
- outb(timing, ld_qdi->timing + 2 * adev->devno);
/* Clear the FIFO */
- if (adev->class != ATA_DEV_ATA)
+ if (ld_qdi->type != QDI6500 && adev->class != ATA_DEV_ATA)
outb(0x5F, (ld_qdi->timing & 0xFFF0) + 3);
}
@@ -788,20 +741,20 @@ static int qdi_port(struct platform_device *dev,
static struct ata_port_operations qdi6500_port_ops = {
.inherits = &legacy_base_port_ops,
- .set_piomode = qdi6500_set_piomode,
+ .set_piomode = qdi65x0_set_piomode,
.qc_issue = qdi_qc_issue,
.sff_data_xfer = vlb32_data_xfer,
};
static struct ata_port_operations qdi6580_port_ops = {
.inherits = &legacy_base_port_ops,
- .set_piomode = qdi6580_set_piomode,
+ .set_piomode = qdi65x0_set_piomode,
.sff_data_xfer = vlb32_data_xfer,
};
static struct ata_port_operations qdi6580dp_port_ops = {
.inherits = &legacy_base_port_ops,
- .set_piomode = qdi6580dp_set_piomode,
+ .set_piomode = qdi65x0_set_piomode,
.qc_issue = qdi_qc_issue,
.sff_data_xfer = vlb32_data_xfer,
};
@@ -878,29 +831,29 @@ static struct ata_port_operations winbond_port_ops = {
};
static struct legacy_controller controllers[] = {
- {"BIOS", &legacy_port_ops, 0x1F,
+ {"BIOS", &legacy_port_ops, ATA_PIO4,
ATA_FLAG_NO_IORDY, 0, NULL },
- {"Snooping", &simple_port_ops, 0x1F,
+ {"Snooping", &simple_port_ops, ATA_PIO4,
0, 0, NULL },
- {"PDC20230", &pdc20230_port_ops, 0x7,
+ {"PDC20230", &pdc20230_port_ops, ATA_PIO2,
ATA_FLAG_NO_IORDY,
ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, NULL },
- {"HT6560A", &ht6560a_port_ops, 0x07,
+ {"HT6560A", &ht6560a_port_ops, ATA_PIO2,
ATA_FLAG_NO_IORDY, 0, NULL },
- {"HT6560B", &ht6560b_port_ops, 0x1F,
+ {"HT6560B", &ht6560b_port_ops, ATA_PIO4,
ATA_FLAG_NO_IORDY, 0, NULL },
- {"OPTI82C611A", &opti82c611a_port_ops, 0x0F,
+ {"OPTI82C611A", &opti82c611a_port_ops, ATA_PIO3,
0, 0, NULL },
- {"OPTI82C46X", &opti82c46x_port_ops, 0x0F,
+ {"OPTI82C46X", &opti82c46x_port_ops, ATA_PIO3,
0, 0, NULL },
- {"QDI6500", &qdi6500_port_ops, 0x07,
+ {"QDI6500", &qdi6500_port_ops, ATA_PIO2,
ATA_FLAG_NO_IORDY,
ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port },
- {"QDI6580", &qdi6580_port_ops, 0x1F,
+ {"QDI6580", &qdi6580_port_ops, ATA_PIO4,
0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port },
- {"QDI6580DP", &qdi6580dp_port_ops, 0x1F,
+ {"QDI6580DP", &qdi6580dp_port_ops, ATA_PIO4,
0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port },
- {"W83759A", &winbond_port_ops, 0x1F,
+ {"W83759A", &winbond_port_ops, ATA_PIO4,
0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE,
winbond_port }
};
@@ -1021,6 +974,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
ctrl_addr = devm_ioport_map(&pdev->dev, io + 0x0206, 1);
if (!io_addr || !ctrl_addr)
goto fail;
+ ld->type = probe->type;
if (controller->setup)
if (controller->setup(pdev, probe, ld) < 0)
goto fail;
@@ -1305,6 +1259,7 @@ MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for legacy ATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("pata_qdi");
MODULE_ALIAS("pata_winbond");
module_param(probe_all, int, 0);
diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c
index 46f589e..b057e3f 100644
--- a/drivers/ata/pata_macio.c
+++ b/drivers/ata/pata_macio.c
@@ -772,8 +772,9 @@ static void pata_macio_reset_hw(struct pata_macio_priv *priv, int resume)
pci_restore_state(priv->pdev);
rc = pcim_enable_device(priv->pdev);
if (rc)
- dev_printk(KERN_ERR, &priv->pdev->dev,
- "Failed to enable device after resume (%d)\n", rc);
+ dev_err(&priv->pdev->dev,
+ "Failed to enable device after resume (%d)\n",
+ rc);
else
pci_set_master(priv->pdev);
}
@@ -812,7 +813,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
blk_queue_update_dma_pad(sdev->request_queue, 31);
/* Tell the world about it */
- ata_dev_printk(dev, KERN_INFO, "OHare alignment limits applied\n");
+ ata_dev_info(dev, "OHare alignment limits applied\n");
return 0;
}
@@ -838,8 +839,7 @@ static int pata_macio_slave_config(struct scsi_device *sdev)
cmd | PCI_COMMAND_INVALIDATE);
/* Tell the world about it */
- ata_dev_printk(dev, KERN_INFO,
- "K2/Shasta alignment limits applied\n");
+ ata_dev_info(dev, "K2/Shasta alignment limits applied\n");
}
return 0;
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 2fcac51..3e17463 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -780,7 +780,7 @@ mpc52xx_ata_probe(struct platform_device *op)
}
task_irq = bcom_get_task_irq(dmatsk);
- ret = request_irq(task_irq, &mpc52xx_ata_task_irq, IRQF_DISABLED,
+ ret = request_irq(task_irq, &mpc52xx_ata_task_irq, 0,
"ATA task", priv);
if (ret) {
dev_err(&op->dev, "error requesting DMA IRQ\n");
diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
index d8d9c58..9dc16df 100644
--- a/drivers/ata/pata_mpiix.c
+++ b/drivers/ata/pata_mpiix.c
@@ -152,15 +152,13 @@ static struct ata_port_operations mpiix_port_ops = {
static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
/* Single threaded by the PCI probe logic */
- static int printed_version;
struct ata_host *host;
struct ata_port *ap;
void __iomem *cmd_addr, *ctl_addr;
u16 idetim;
int cmd, ctl, irq;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&dev->dev, DRV_VERSION);
host = ata_host_alloc(&dev->dev, 1);
if (!host)
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index 3eb921c..9979a43 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -57,7 +57,6 @@ static struct ata_port_operations netcell_ops = {
static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
/* Actually we don't really care about these as the
@@ -70,9 +69,7 @@ static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *e
const struct ata_port_info *port_info[] = { &info, NULL };
int rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
index 2110863..31d5986 100644
--- a/drivers/ata/pata_ns87410.c
+++ b/drivers/ata/pata_ns87410.c
@@ -86,7 +86,7 @@ static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev)
idefr &= ~0x04;
if (ata_timing_compute(adev, adev->pio_mode, &at, 30303, 1) < 0) {
- dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", adev->pio_mode);
+ dev_err(&pdev->dev, "unknown mode %d\n", adev->pio_mode);
return;
}
diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
index 605f198..f1d517b 100644
--- a/drivers/ata/pata_ns87415.c
+++ b/drivers/ata/pata_ns87415.c
@@ -350,7 +350,6 @@ static void ns87415_fixup(struct pci_dev *pdev)
static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -370,9 +369,7 @@ static int ns87415_init_one (struct pci_dev *pdev, const struct pci_device_id *e
if (PCI_SLOT(pdev->devfn) == 0x0E)
ppi[0] = &info87560;
#endif
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 220ddc9..1d61d5d 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -405,7 +405,7 @@ static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
rc = ata_sff_wait_after_reset(link, 1, deadline);
if (rc) {
- ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
+ ata_link_err(link, "SRST failed (errno=%d)\n", rc);
return rc;
}
@@ -807,6 +807,7 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
irq_handler_t irq_handler = NULL;
void __iomem *base;
struct octeon_cf_port *cf_port;
+ char version[32];
res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -905,10 +906,11 @@ static int __devinit octeon_cf_probe(struct platform_device *pdev)
ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
- dev_info(&pdev->dev, "version " DRV_VERSION" %d bit%s.\n",
+ snprintf(version, sizeof(version), "%s %d bit%s",
+ DRV_VERSION,
(ocd->is16bit) ? 16 : 8,
(cs1) ? ", True IDE" : "");
-
+ ata_print_version_once(&pdev->dev, version);
return ata_host_activate(host, irq, irq_handler, 0, &octeon_cf_sht);
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index f305400..2a472c5 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -11,6 +11,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/ata_platform.h>
@@ -50,18 +52,18 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
}
ret = of_irq_to_resource(dn, 0, &irq_res);
- if (ret == NO_IRQ)
+ if (!ret)
irq_res.start = irq_res.end = 0;
else
irq_res.flags = 0;
prop = of_get_property(dn, "reg-shift", NULL);
if (prop)
- reg_shift = *prop;
+ reg_shift = be32_to_cpup(prop);
prop = of_get_property(dn, "pio-mode", NULL);
if (prop) {
- pio_mode = *prop;
+ pio_mode = be32_to_cpup(prop);
if (pio_mode > 6) {
dev_err(&ofdev->dev, "invalid pio-mode\n");
return -EINVAL;
diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
index b811c16..98cdf50 100644
--- a/drivers/ata/pata_oldpiix.c
+++ b/drivers/ata/pata_oldpiix.c
@@ -235,7 +235,6 @@ static struct ata_port_operations oldpiix_pata_ops = {
static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -244,9 +243,7 @@ static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *e
};
const struct ata_port_info *ppi[] = { &info, NULL };
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(pdev, ppi, &oldpiix_sht, NULL, 0);
}
diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
index 00c5a02..accc033 100644
--- a/drivers/ata/pata_opti.c
+++ b/drivers/ata/pata_opti.c
@@ -167,10 +167,8 @@ static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
.port_ops = &opti_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
- static int printed_version;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&dev->dev, DRV_VERSION);
return ata_pci_sff_init_one(dev, ppi, &opti_sht, NULL, 0);
}
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index 0852cd0..77cb914 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -411,11 +411,9 @@ static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
.port_ops = &optiplus_port_ops
};
const struct ata_port_info *ppi[] = { &info_82c700, NULL };
- static int printed_version;
int rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&dev->dev, DRV_VERSION);
rc = pcim_enable_device(dev);
if (rc)
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index 021abe6..a808ba0 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -68,7 +68,7 @@ static int pcmcia_set_mode(struct ata_link *link, struct ata_device **r_failed_d
the same vendor - check serial */
if (memcmp(master->id + ATA_ID_SERNO, slave->id + ATA_ID_SERNO,
ATA_ID_SERNO_LEN) == 0 && master->id[ATA_ID_SERNO] >> 8) {
- ata_dev_printk(slave, KERN_WARNING, "is a ghost device, ignoring.\n");
+ ata_dev_warn(slave, "is a ghost device, ignoring\n");
ata_dev_disable(slave);
}
}
@@ -142,8 +142,7 @@ static void pcmcia_8bit_drain_fifo(struct ata_queued_cmd *qc)
ioread8(ap->ioaddr.data_addr);
if (count)
- ata_port_printk(ap, KERN_WARNING, "drained %d bytes to clear DRQ.\n",
- count);
+ ata_port_warn(ap, "drained %d bytes to clear DRQ\n", count);
}
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index 9765ace..7d63f24 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -63,6 +63,7 @@ enum {
};
static int pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int pdc2027x_reinit_one(struct pci_dev *pdev);
static int pdc2027x_prereset(struct ata_link *link, unsigned long deadline);
static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev);
static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev);
@@ -126,6 +127,10 @@ static struct pci_driver pdc2027x_pci_driver = {
.id_table = pdc2027x_pci_tbl,
.probe = pdc2027x_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = pdc2027x_reinit_one,
+#endif
};
static struct scsi_host_template pdc2027x_sht = {
@@ -655,7 +660,7 @@ static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx)
*/
pll_clock = pdc_detect_pll_input_clock(host);
- dev_printk(KERN_INFO, host->dev, "PLL input clock %ld kHz\n", pll_clock/1000);
+ dev_info(host->dev, "PLL input clock %ld kHz\n", pll_clock/1000);
/* Adjust PLL control register */
pdc_adjust_pll(host, pll_clock, board_idx);
@@ -697,7 +702,6 @@ static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base)
*/
static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
static const unsigned long cmd_offset[] = { 0x17c0, 0x15c0 };
static const unsigned long bmdma_offset[] = { 0x1000, 0x1008 };
unsigned int board_idx = (unsigned int) ent->driver_data;
@@ -707,8 +711,7 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
void __iomem *mmio_base;
int i, rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* alloc host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
@@ -756,6 +759,31 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
IRQF_SHARED, &pdc2027x_sht);
}
+#ifdef CONFIG_PM
+static int pdc2027x_reinit_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ unsigned int board_idx;
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ if (pdev->device == PCI_DEVICE_ID_PROMISE_20268 ||
+ pdev->device == PCI_DEVICE_ID_PROMISE_20270)
+ board_idx = PDC_UDMA_100;
+ else
+ board_idx = PDC_UDMA_133;
+
+ if (pdc_hardware_init(host, board_idx))
+ return -EIO;
+
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
/**
* pdc2027x_init - Called after this module is loaded into the kernel.
*/
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 50400fa..2067308 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -39,7 +39,7 @@ static int pata_platform_set_mode(struct ata_link *link, struct ata_device **unu
dev->pio_mode = dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
- ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ ata_dev_info(dev, "configured for PIO\n");
}
return 0;
}
diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
deleted file mode 100644
index 45879dc..0000000
--- a/drivers/ata/pata_qdi.c
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * pata_qdi.c - QDI VLB ATA controllers
- * (C) 2006 Red Hat
- *
- * This driver mostly exists as a proof of concept for non PCI devices under
- * libata. While the QDI6580 was 'neat' in 1993 it is no longer terribly
- * useful.
- *
- * Tuning code written from the documentation at
- * http://www.ryston.cz/petr/vlb/qd6500.html
- * http://www.ryston.cz/petr/vlb/qd6580.html
- *
- * Probe code based on drivers/ide/legacy/qd65xx.c
- * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by
- * Samuel Thibault <samuel.thibault@ens-lyon.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h>
-#include <scsi/scsi_host.h>
-#include <linux/libata.h>
-#include <linux/platform_device.h>
-
-#define DRV_NAME "pata_qdi"
-#define DRV_VERSION "0.3.1"
-
-#define NR_HOST 4 /* Two 6580s */
-
-struct qdi_data {
- unsigned long timing;
- u8 clock[2];
- u8 last;
- int fast;
- struct platform_device *platform_dev;
-
-};
-
-static struct ata_host *qdi_host[NR_HOST];
-static struct qdi_data qdi_data[NR_HOST];
-static int nr_qdi_host;
-
-#ifdef MODULE
-static int probe_qdi = 1;
-#else
-static int probe_qdi;
-#endif
-
-static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- struct ata_timing t;
- struct qdi_data *qdi = ap->host->private_data;
- int active, recovery;
- u8 timing;
-
- /* Get the timing data in cycles */
- ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
-
- if (qdi->fast) {
- active = 8 - clamp_val(t.active, 1, 8);
- recovery = 18 - clamp_val(t.recover, 3, 18);
- } else {
- active = 9 - clamp_val(t.active, 2, 9);
- recovery = 15 - clamp_val(t.recover, 0, 15);
- }
- timing = (recovery << 4) | active | 0x08;
-
- qdi->clock[adev->devno] = timing;
-
- outb(timing, qdi->timing);
-}
-
-static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- struct ata_timing t;
- struct qdi_data *qdi = ap->host->private_data;
- int active, recovery;
- u8 timing;
-
- /* Get the timing data in cycles */
- ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
-
- if (qdi->fast) {
- active = 8 - clamp_val(t.active, 1, 8);
- recovery = 18 - clamp_val(t.recover, 3, 18);
- } else {
- active = 9 - clamp_val(t.active, 2, 9);
- recovery = 15 - clamp_val(t.recover, 0, 15);
- }
- timing = (recovery << 4) | active | 0x08;
-
- qdi->clock[adev->devno] = timing;
-
- outb(timing, qdi->timing);
-
- /* Clear the FIFO */
- if (adev->class != ATA_DEV_ATA)
- outb(0x5F, (qdi->timing & 0xFFF0) + 3);
-}
-
-/**
- * qdi_qc_issue - command issue
- * @qc: command pending
- *
- * Called when the libata layer is about to issue a command. We wrap
- * this interface so that we can load the correct ATA timings.
- */
-
-static unsigned int qdi_qc_issue(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct ata_device *adev = qc->dev;
- struct qdi_data *qdi = ap->host->private_data;
-
- if (qdi->clock[adev->devno] != qdi->last) {
- if (adev->pio_mode) {
- qdi->last = qdi->clock[adev->devno];
- outb(qdi->clock[adev->devno], qdi->timing);
- }
- }
- return ata_sff_qc_issue(qc);
-}
-
-static unsigned int qdi_data_xfer(struct ata_device *dev, unsigned char *buf,
- unsigned int buflen, int rw)
-{
- if (ata_id_has_dword_io(dev->id)) {
- struct ata_port *ap = dev->link->ap;
- int slop = buflen & 3;
-
- if (rw == READ)
- ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
- else
- iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
-
- if (unlikely(slop)) {
- __le32 pad;
- if (rw == READ) {
- pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
- memcpy(buf + buflen - slop, &pad, slop);
- } else {
- memcpy(&pad, buf + buflen - slop, slop);
- iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
- }
- buflen += 4 - slop;
- }
- } else
- buflen = ata_sff_data_xfer(dev, buf, buflen, rw);
-
- return buflen;
-}
-
-static struct scsi_host_template qdi_sht = {
- ATA_PIO_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations qdi6500_port_ops = {
- .inherits = &ata_sff_port_ops,
- .qc_issue = qdi_qc_issue,
- .sff_data_xfer = qdi_data_xfer,
- .cable_detect = ata_cable_40wire,
- .set_piomode = qdi6500_set_piomode,
-};
-
-static struct ata_port_operations qdi6580_port_ops = {
- .inherits = &qdi6500_port_ops,
- .set_piomode = qdi6580_set_piomode,
-};
-
-/**
- * qdi_init_one - attach a qdi interface
- * @type: Type to display
- * @io: I/O port start
- * @irq: interrupt line
- * @fast: True if on a > 33Mhz VLB
- *
- * Register an ISA bus IDE interface. Such interfaces are PIO and we
- * assume do not support IRQ sharing.
- */
-
-static __init int qdi_init_one(unsigned long port, int type, unsigned long io, int irq, int fast)
-{
- unsigned long ctl = io + 0x206;
- struct platform_device *pdev;
- struct ata_host *host;
- struct ata_port *ap;
- void __iomem *io_addr, *ctl_addr;
- int ret;
-
- /*
- * Fill in a probe structure first of all
- */
-
- pdev = platform_device_register_simple(DRV_NAME, nr_qdi_host, NULL, 0);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
-
- ret = -ENOMEM;
- io_addr = devm_ioport_map(&pdev->dev, io, 8);
- ctl_addr = devm_ioport_map(&pdev->dev, ctl, 1);
- if (!io_addr || !ctl_addr)
- goto fail;
-
- ret = -ENOMEM;
- host = ata_host_alloc(&pdev->dev, 1);
- if (!host)
- goto fail;
- ap = host->ports[0];
-
- if (type == 6580) {
- ap->ops = &qdi6580_port_ops;
- ap->pio_mask = ATA_PIO4;
- ap->flags |= ATA_FLAG_SLAVE_POSS;
- } else {
- ap->ops = &qdi6500_port_ops;
- ap->pio_mask = ATA_PIO2; /* Actually PIO3 !IORDY is possible */
- ap->flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_IORDY;
- }
-
- ap->ioaddr.cmd_addr = io_addr;
- ap->ioaddr.altstatus_addr = ctl_addr;
- ap->ioaddr.ctl_addr = ctl_addr;
- ata_sff_std_ports(&ap->ioaddr);
-
- ata_port_desc(ap, "cmd %lx ctl %lx", io, ctl);
-
- /*
- * Hook in a private data structure per channel
- */
- ap->private_data = &qdi_data[nr_qdi_host];
-
- qdi_data[nr_qdi_host].timing = port;
- qdi_data[nr_qdi_host].fast = fast;
- qdi_data[nr_qdi_host].platform_dev = pdev;
-
- printk(KERN_INFO DRV_NAME": qd%d at 0x%lx.\n", type, io);
-
- /* activate */
- ret = ata_host_activate(host, irq, ata_sff_interrupt, 0, &qdi_sht);
- if (ret)
- goto fail;
-
- qdi_host[nr_qdi_host++] = dev_get_drvdata(&pdev->dev);
- return 0;
-
- fail:
- platform_device_unregister(pdev);
- return ret;
-}
-
-/**
- * qdi_init - attach qdi interfaces
- *
- * Attach qdi IDE interfaces by scanning the ports it may occupy.
- */
-
-static __init int qdi_init(void)
-{
- unsigned long flags;
- static const unsigned long qd_port[2] = { 0x30, 0xB0 };
- static const unsigned long ide_port[2] = { 0x170, 0x1F0 };
- static const int ide_irq[2] = { 14, 15 };
-
- int ct = 0;
- int i;
-
- if (probe_qdi == 0)
- return -ENODEV;
-
- /*
- * Check each possible QD65xx base address
- */
-
- for (i = 0; i < 2; i++) {
- unsigned long port = qd_port[i];
- u8 r, res;
-
-
- if (request_region(port, 2, "pata_qdi")) {
- /* Check for a card */
- local_irq_save(flags);
- r = inb_p(port);
- outb_p(0x19, port);
- res = inb_p(port);
- outb_p(r, port);
- local_irq_restore(flags);
-
- /* Fail */
- if (res == 0x19)
- {
- release_region(port, 2);
- continue;
- }
-
- /* Passes the presence test */
- r = inb_p(port + 1); /* Check port agrees with port set */
- if ((r & 2) >> 1 != i) {
- release_region(port, 2);
- continue;
- }
-
- /* Check card type */
- if ((r & 0xF0) == 0xC0) {
- /* QD6500: single channel */
- if (r & 8) {
- /* Disabled ? */
- release_region(port, 2);
- continue;
- }
- if (qdi_init_one(port, 6500, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04) == 0)
- ct++;
- }
- if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) {
- /* QD6580: dual channel */
- if (!request_region(port + 2 , 2, "pata_qdi"))
- {
- release_region(port, 2);
- continue;
- }
- res = inb(port + 3);
- if (res & 1) {
- /* Single channel mode */
- if (qdi_init_one(port, 6580, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04) == 0)
- ct++;
- } else {
- /* Dual channel mode */
- if (qdi_init_one(port, 6580, 0x1F0, 14, r & 0x04) == 0)
- ct++;
- if (qdi_init_one(port + 2, 6580, 0x170, 15, r & 0x04) == 0)
- ct++;
- }
- }
- }
- }
- if (ct != 0)
- return 0;
- return -ENODEV;
-}
-
-static __exit void qdi_exit(void)
-{
- int i;
-
- for (i = 0; i < nr_qdi_host; i++) {
- ata_host_detach(qdi_host[i]);
- /* Free the control resource. The 6580 dual channel has the resources
- * claimed as a pair of 2 byte resources so we need no special cases...
- */
- release_region(qdi_data[i].timing, 2);
- platform_device_unregister(qdi_data[i].platform_dev);
- }
-}
-
-MODULE_AUTHOR("Alan Cox");
-MODULE_DESCRIPTION("low-level driver for qdi ATA");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
-
-module_init(qdi_init);
-module_exit(qdi_exit);
-
-module_param(probe_qdi, int, 0);
-
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 8574b31..b2d3a2b 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -213,7 +213,6 @@ static struct ata_port_operations radisys_pata_ops = {
static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
static const struct ata_port_info info = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
@@ -223,9 +222,7 @@ static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *e
};
const struct ata_port_info *ppi[] = { &info, NULL };
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(pdev, ppi, &radisys_sht, NULL, 0);
}
diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
index 5fbe9b1..e6a2dd7 100644
--- a/drivers/ata/pata_rdc.c
+++ b/drivers/ata/pata_rdc.c
@@ -86,6 +86,8 @@ static int rdc_pata_prereset(struct ata_link *link, unsigned long deadline)
return ata_sff_prereset(link, deadline);
}
+static DEFINE_SPINLOCK(rdc_lock);
+
/**
* rdc_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
@@ -101,6 +103,7 @@ static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ unsigned long flags;
unsigned int is_slave = (adev->devno != 0);
unsigned int master_port= ap->port_no ? 0x42 : 0x40;
unsigned int slave_port = 0x44;
@@ -124,6 +127,8 @@ static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE enable */
+ spin_lock_irqsave(&rdc_lock, flags);
+
/* PIO configuration clears DTE unconditionally. It will be
* programmed in set_dmamode which is guaranteed to be called
* after set_piomode if any DMA mode is available.
@@ -161,6 +166,8 @@ static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
pci_read_config_byte(dev, 0x48, &udma_enable);
udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
pci_write_config_byte(dev, 0x48, udma_enable);
+
+ spin_unlock_irqrestore(&rdc_lock, flags);
}
/**
@@ -177,6 +184,7 @@ static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
+ unsigned long flags;
u8 master_port = ap->port_no ? 0x42 : 0x40;
u16 master_data;
u8 speed = adev->dma_mode;
@@ -190,6 +198,8 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{ 2, 1 },
{ 2, 3 }, };
+ spin_lock_irqsave(&rdc_lock, flags);
+
pci_read_config_word(dev, master_port, &master_data);
pci_read_config_byte(dev, 0x48, &udma_enable);
@@ -271,6 +281,8 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
pci_write_config_word(dev, master_port, master_data);
}
pci_write_config_byte(dev, 0x48, udma_enable);
+
+ spin_unlock_irqrestore(&rdc_lock, flags);
}
static struct ata_port_operations rdc_pata_ops = {
@@ -312,7 +324,6 @@ static struct scsi_host_template rdc_sht = {
static int __devinit rdc_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int printed_version;
struct device *dev = &pdev->dev;
struct ata_port_info port_info[2];
const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
@@ -321,9 +332,7 @@ static int __devinit rdc_init_one(struct pci_dev *pdev,
struct rdc_host_priv *hpriv;
int rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
port_info[0] = rdc_port_info;
port_info[1] = rdc_port_info;
@@ -378,6 +387,10 @@ static struct pci_driver rdc_pci_driver = {
.id_table = rdc_pci_tbl,
.probe = rdc_init_one,
.remove = rdc_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
};
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index 4d04471..aca321e 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -44,7 +44,7 @@ static int rz1000_set_mode(struct ata_link *link, struct ata_device **unused)
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
- ata_dev_printk(dev, KERN_INFO, "configured for PIO\n");
+ ata_dev_info(dev, "configured for PIO\n");
}
return 0;
}
@@ -92,7 +92,7 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en
};
const struct ata_port_info *ppi[] = { &info, NULL };
- printk_once(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
if (rz1000_fifo_disable(pdev) == 0)
return ata_pci_sff_init_one(pdev, ppi, &rz1000_sht, NULL, 0);
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index c446ae6..1b372c2 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -376,7 +376,7 @@ static int pata_s3c_softreset(struct ata_link *link, unsigned int *classes,
rc = pata_s3c_bus_softreset(ap, deadline);
/* if link is occupied, -ENODEV too is an error */
if (rc && rc != -ENODEV) {
- ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
+ ata_link_err(link, "SRST failed (errno=%d)\n", rc);
return rc;
}
diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
index e2c1825..c0e603a 100644
--- a/drivers/ata/pata_sc1200.c
+++ b/drivers/ata/pata_sc1200.c
@@ -38,7 +38,7 @@
#include <scsi/scsi_host.h>
#include <linux/libata.h>
-#define DRV_NAME "sc1200"
+#define DRV_NAME "pata_sc1200"
#define DRV_VERSION "0.2.6"
#define SC1200_REV_A 0x00
@@ -86,10 +86,14 @@ static int sc1200_clock(void)
static void sc1200_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static const u32 pio_timings[4][5] = {
- {0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010}, // format0 33Mhz
- {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}, // format1, 33Mhz
- {0xfaa3f4f3, 0xc23232b2, 0x513101c1, 0x31213121, 0x10211021}, // format1, 48Mhz
- {0xfff4fff4, 0xf35353d3, 0x814102f1, 0x42314231, 0x11311131} // format1, 66Mhz
+ /* format0, 33Mhz */
+ { 0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010 },
+ /* format1, 33Mhz */
+ { 0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010 },
+ /* format1, 48Mhz */
+ { 0xfaa3f4f3, 0xc23232b2, 0x513101c1, 0x31213121, 0x10211021 },
+ /* format1, 66Mhz */
+ { 0xfff4fff4, 0xf35353d3, 0x814102f1, 0x42314231, 0x11311131 }
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
index 88ea9b6..19759d3 100644
--- a/drivers/ata/pata_scc.c
+++ b/drivers/ata/pata_scc.c
@@ -586,7 +586,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
* Note: Original code is ata_bus_softreset().
*/
-static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
+static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
unsigned long deadline)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
@@ -600,9 +600,7 @@ static unsigned int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
udelay(20);
out_be32(ioaddr->ctl_addr, ap->ctl);
- scc_wait_after_reset(&ap->link, devmask, deadline);
-
- return 0;
+ return scc_wait_after_reset(&ap->link, devmask, deadline);
}
/**
@@ -619,7 +617,8 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
{
struct ata_port *ap = link->ap;
unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
- unsigned int devmask = 0, err_mask;
+ unsigned int devmask = 0;
+ int rc;
u8 err;
DPRINTK("ENTER\n");
@@ -635,10 +634,9 @@ static int scc_softreset(struct ata_link *link, unsigned int *classes,
/* issue bus reset */
DPRINTK("about to softreset, devmask=%x\n", devmask);
- err_mask = scc_bus_softreset(ap, devmask, deadline);
- if (err_mask) {
- ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
- err_mask);
+ rc = scc_bus_softreset(ap, devmask, deadline);
+ if (rc) {
+ ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc);
return -EIO;
}
@@ -827,18 +825,6 @@ static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf,
}
/**
- * scc_pata_prereset - prepare for reset
- * @ap: ATA port to be reset
- * @deadline: deadline jiffies for the operation
- */
-
-static int scc_pata_prereset(struct ata_link *link, unsigned long deadline)
-{
- link->ap->cbl = ATA_CBL_PATA80;
- return ata_sff_prereset(link, deadline);
-}
-
-/**
* scc_postreset - standard postreset callback
* @ap: the target ata_port
* @classes: classes of attached devices
@@ -947,7 +933,7 @@ static struct ata_port_operations scc_pata_ops = {
.bmdma_status = scc_bmdma_status,
.sff_data_xfer = scc_data_xfer,
- .prereset = scc_pata_prereset,
+ .cable_detect = ata_cable_80wire,
.softreset = scc_softreset,
.postreset = scc_postreset,
@@ -1072,15 +1058,12 @@ static int scc_host_init(struct ata_host *host)
static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
unsigned int board_idx = (unsigned int) ent->driver_data;
const struct ata_port_info *ppi[] = { &scc_port_info[board_idx], NULL };
struct ata_host *host;
int rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
if (!host)
diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
index e97b32f..7c78b99 100644
--- a/drivers/ata/pata_sch.c
+++ b/drivers/ata/pata_sch.c
@@ -172,12 +172,9 @@ static void sch_set_dmamode(struct ata_port *ap, struct ata_device *adev)
static int __devinit sch_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int printed_version;
const struct ata_port_info *ppi[] = { &sch_port_info, NULL };
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(pdev, ppi, &sch_sht, NULL, 0);
}
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index 86dd714..5929dde 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -58,31 +58,15 @@ static const char *csb_bad_ata100[] = {
};
/**
- * dell_cable - Dell serverworks cable detection
+ * oem_cable - Dell/Sun serverworks cable detection
* @ap: ATA port to do cable detect
*
- * Dell hide the 40/80 pin select for their interfaces in the top two
- * bits of the subsystem ID.
+ * Dell PowerEdge and Sun Cobalt 'Alpine' hide the 40/80 pin select
+ * for their interfaces in the top two bits of the subsystem ID.
*/
-static int dell_cable(struct ata_port *ap) {
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
-
- if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
- return ATA_CBL_PATA80;
- return ATA_CBL_PATA40;
-}
-
-/**
- * sun_cable - Sun Cobalt 'Alpine' cable detection
- * @ap: ATA port to do cable select
- *
- * Cobalt CSB5 IDE hides the 40/80pin in the top two bits of the
- * subsystem ID the same as dell. We could use one function but we may
- * need to extend the Dell one in future
- */
-
-static int sun_cable(struct ata_port *ap) {
+static int oem_cable(struct ata_port *ap)
+{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
@@ -90,49 +74,21 @@ static int sun_cable(struct ata_port *ap) {
return ATA_CBL_PATA40;
}
-/**
- * osb4_cable - OSB4 cable detect
- * @ap: ATA port to check
- *
- * The OSB4 isn't UDMA66 capable so this is easy
- */
-
-static int osb4_cable(struct ata_port *ap) {
- return ATA_CBL_PATA40;
-}
-
-/**
- * csb_cable - CSB5/6 cable detect
- * @ap: ATA port to check
- *
- * Serverworks default arrangement is to use the drive side detection
- * only.
- */
-
-static int csb_cable(struct ata_port *ap) {
- return ATA_CBL_PATA_UNK;
-}
-
struct sv_cable_table {
int device;
int subvendor;
int (*cable_detect)(struct ata_port *ap);
};
-/*
- * Note that we don't copy the old serverworks code because the old
- * code contains obvious mistakes
- */
-
static struct sv_cable_table cable_detect[] = {
- { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_DELL, dell_cable },
- { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_VENDOR_ID_DELL, dell_cable },
- { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_SUN, sun_cable },
- { PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, PCI_ANY_ID, osb4_cable },
- { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, csb_cable },
- { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, csb_cable },
- { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, csb_cable },
- { PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, csb_cable },
+ { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_DELL, oem_cable },
+ { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_VENDOR_ID_DELL, oem_cable },
+ { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_SUN, oem_cable },
+ { PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, PCI_ANY_ID, ata_cable_40wire },
+ { PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, ata_cable_unknown },
+ { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, ata_cable_unknown },
+ { PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, ata_cable_unknown },
+ { PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, ata_cable_unknown },
{ }
};
@@ -296,12 +252,18 @@ static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev
pci_write_config_byte(pdev, 0x54, ultra_cfg);
}
-static struct scsi_host_template serverworks_sht = {
+static struct scsi_host_template serverworks_osb4_sht = {
+ ATA_BMDMA_SHT(DRV_NAME),
+ .sg_tablesize = LIBATA_DUMB_MAX_PRD,
+};
+
+static struct scsi_host_template serverworks_csb_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
static struct ata_port_operations serverworks_osb4_port_ops = {
.inherits = &ata_bmdma_port_ops,
+ .qc_prep = ata_bmdma_dumb_qc_prep,
.cable_detect = serverworks_cable_detect,
.mode_filter = serverworks_osb4_filter,
.set_piomode = serverworks_set_piomode,
@@ -310,6 +272,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
static struct ata_port_operations serverworks_csb_port_ops = {
.inherits = &serverworks_osb4_port_ops,
+ .qc_prep = ata_bmdma_qc_prep,
.mode_filter = serverworks_csb_filter,
};
@@ -393,6 +356,31 @@ static void serverworks_fixup_ht1000(struct pci_dev *pdev)
pci_write_config_byte(pdev, 0x5A, btr);
}
+static int serverworks_fixup(struct pci_dev *pdev)
+{
+ int rc = 0;
+
+ /* Force master latency timer to 64 PCI clocks */
+ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE:
+ rc = serverworks_fixup_osb4(pdev);
+ break;
+ case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
+ ata_pci_bmdma_clear_simplex(pdev);
+ /* fall through */
+ case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
+ case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
+ rc = serverworks_fixup_csb(pdev);
+ break;
+ case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
+ serverworks_fixup_ht1000(pdev);
+ break;
+ }
+
+ return rc;
+}
static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
@@ -424,20 +412,21 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
}
};
const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
+ struct scsi_host_template *sht = &serverworks_csb_sht;
int rc;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
- /* Force master latency timer to 64 PCI clocks */
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
+ rc = serverworks_fixup(pdev);
/* OSB4 : South Bridge and IDE */
if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
/* Select non UDMA capable OSB4 if we can't do fixups */
- if ( serverworks_fixup_osb4(pdev) < 0)
+ if (rc < 0)
ppi[0] = &info[1];
+ sht = &serverworks_osb4_sht;
}
/* setup CSB5/CSB6 : South Bridge and IDE option RAID */
else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
@@ -446,21 +435,15 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
/* If the returned btr is the newer revision then
select the right info block */
- if (serverworks_fixup_csb(pdev) == 3)
+ if (rc == 3)
ppi[0] = &info[3];
/* Is this the 3rd channel CSB6 IDE ? */
if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)
ppi[1] = &ata_dummy_port_info;
}
- /* setup HT1000E */
- else if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE)
- serverworks_fixup_ht1000(pdev);
- if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
- ata_pci_bmdma_clear_simplex(pdev);
-
- return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
+ return ata_pci_bmdma_init_one(pdev, ppi, sht, NULL, 0);
}
#ifdef CONFIG_PM
@@ -473,24 +456,7 @@ static int serverworks_reinit_one(struct pci_dev *pdev)
if (rc)
return rc;
- /* Force master latency timer to 64 PCI clocks */
- pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
-
- switch (pdev->device) {
- case PCI_DEVICE_ID_SERVERWORKS_OSB4IDE:
- serverworks_fixup_osb4(pdev);
- break;
- case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
- ata_pci_bmdma_clear_simplex(pdev);
- /* fall through */
- case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
- case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
- serverworks_fixup_csb(pdev);
- break;
- case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
- serverworks_fixup_ht1000(pdev);
- break;
- }
+ (void)serverworks_fixup(pdev);
ata_host_resume(host);
return 0;
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 118787c..b92eacf 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -38,11 +38,12 @@
/**
* sil680_selreg - return register base
- * @hwif: interface
+ * @ap: ATA interface
* @r: config offset
*
- * Turn a config register offset into the right address in either
- * PCI space or MMIO space to access the control register in question
+ * Turn a config register offset into the right address in PCI space
+ * to access the control register in question.
+ *
* Thankfully this is a configuration operation so isn't performance
* criticial.
*/
@@ -56,12 +57,12 @@ static unsigned long sil680_selreg(struct ata_port *ap, int r)
/**
* sil680_seldev - return register base
- * @hwif: interface
+ * @ap: ATA interface
* @r: config offset
*
- * Turn a config register offset into the right address in either
- * PCI space or MMIO space to access the control register in question
- * including accounting for the unit shift.
+ * Turn a config register offset into the right address in PCI space
+ * to access the control register in question including accounting for
+ * the unit shift.
*/
static unsigned long sil680_seldev(struct ata_port *ap, struct ata_device *adev, int r)
@@ -81,7 +82,8 @@ static unsigned long sil680_seldev(struct ata_port *ap, struct ata_device *adev,
* space for us.
*/
-static int sil680_cable_detect(struct ata_port *ap) {
+static int sil680_cable_detect(struct ata_port *ap)
+{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned long addr = sil680_selreg(ap, 0);
u8 ata66;
@@ -93,7 +95,7 @@ static int sil680_cable_detect(struct ata_port *ap) {
}
/**
- * sil680_set_piomode - set initial PIO mode data
+ * sil680_set_piomode - set PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
@@ -104,8 +106,12 @@ static int sil680_cable_detect(struct ata_port *ap) {
static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
- static u16 speed_p[5] = { 0x328A, 0x2283, 0x1104, 0x10C3, 0x10C1 };
- static u16 speed_t[5] = { 0x328A, 0x2283, 0x1281, 0x10C3, 0x10C1 };
+ static const u16 speed_p[5] = {
+ 0x328A, 0x2283, 0x1104, 0x10C3, 0x10C1
+ };
+ static const u16 speed_t[5] = {
+ 0x328A, 0x2283, 0x1281, 0x10C3, 0x10C1
+ };
unsigned long tfaddr = sil680_selreg(ap, 0x02);
unsigned long addr = sil680_seldev(ap, adev, 0x04);
@@ -140,22 +146,23 @@ static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev)
}
/**
- * sil680_set_dmamode - set initial DMA mode data
+ * sil680_set_dmamode - set DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
- * Program the MWDMA/UDMA modes for the sil680 k
- * chipset. The MWDMA mode values are pulled from a lookup table
+ * Program the MWDMA/UDMA modes for the sil680 chipset.
+ *
+ * The MWDMA mode values are pulled from a lookup table
* while the chipset uses mode number for UDMA.
*/
static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
- static u8 ultra_table[2][7] = {
+ static const u8 ultra_table[2][7] = {
{ 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01, 0xFF }, /* 100MHz */
{ 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 }, /* 133Mhz */
};
- static u16 dma_table[3] = { 0x2208, 0x10C2, 0x10C1 };
+ static const u16 dma_table[3] = { 0x2208, 0x10C2, 0x10C1 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned long ma = sil680_seldev(ap, adev, 0x08);
@@ -175,7 +182,7 @@ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
mode &= ~(0x03 << port_shift);
/* Extract scsc */
- scsc = (scsc & 0x30) ? 1: 0;
+ scsc = (scsc & 0x30) ? 1 : 0;
if (adev->dma_mode >= XFER_UDMA_0) {
multi = 0x10C1;
@@ -248,7 +255,7 @@ static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
{
u8 tmpbyte = 0;
- /* FIXME: double check */
+ /* FIXME: double check */
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
pdev->revision ? 1 : 255);
@@ -266,22 +273,22 @@ static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
*try_mmio = (tmpbyte & 1) || pci_resource_start(pdev, 5);
#endif
- switch(tmpbyte & 0x30) {
- case 0x00:
- /* 133 clock attempt to force it on */
- pci_write_config_byte(pdev, 0x8A, tmpbyte|0x10);
- break;
- case 0x30:
- /* if clocking is disabled */
- /* 133 clock attempt to force it on */
- pci_write_config_byte(pdev, 0x8A, tmpbyte & ~0x20);
- break;
- case 0x10:
- /* 133 already */
- break;
- case 0x20:
- /* BIOS set PCI x2 clocking */
- break;
+ switch (tmpbyte & 0x30) {
+ case 0x00:
+ /* 133 clock attempt to force it on */
+ pci_write_config_byte(pdev, 0x8A, tmpbyte|0x10);
+ break;
+ case 0x30:
+ /* if clocking is disabled */
+ /* 133 clock attempt to force it on */
+ pci_write_config_byte(pdev, 0x8A, tmpbyte & ~0x20);
+ break;
+ case 0x10:
+ /* 133 already */
+ break;
+ case 0x20:
+ /* BIOS set PCI x2 clocking */
+ break;
}
pci_read_config_byte(pdev, 0x8A, &tmpbyte);
@@ -299,12 +306,19 @@ static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
pci_write_config_dword(pdev, 0xB8, 0x43924392);
pci_write_config_dword(pdev, 0xBC, 0x40094009);
- switch(tmpbyte & 0x30) {
- case 0x00: printk(KERN_INFO "sil680: 100MHz clock.\n");break;
- case 0x10: printk(KERN_INFO "sil680: 133MHz clock.\n");break;
- case 0x20: printk(KERN_INFO "sil680: Using PCI clock.\n");break;
- /* This last case is _NOT_ ok */
- case 0x30: printk(KERN_ERR "sil680: Clock disabled ?\n");
+ switch (tmpbyte & 0x30) {
+ case 0x00:
+ printk(KERN_INFO "sil680: 100MHz clock.\n");
+ break;
+ case 0x10:
+ printk(KERN_INFO "sil680: 133MHz clock.\n");
+ break;
+ case 0x20:
+ printk(KERN_INFO "sil680: Using PCI clock.\n");
+ break;
+ /* This last case is _NOT_ ok */
+ case 0x30:
+ printk(KERN_ERR "sil680: Clock disabled ?\n");
}
return tmpbyte & 0x30;
}
@@ -327,13 +341,11 @@ static int __devinit sil680_init_one(struct pci_dev *pdev,
.port_ops = &sil680_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
- static int printed_version;
struct ata_host *host;
void __iomem *mmio_base;
int rc, try_mmio;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
index be08ff9..b0edc7d 100644
--- a/drivers/ata/pata_sis.c
+++ b/drivers/ata/pata_sis.c
@@ -55,7 +55,7 @@ static const struct sis_laptop sis_laptop[] = {
/* devid, subvendor, subdev */
{ 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
{ 0x5513, 0x1734, 0x105F }, /* FSC Amilo A1630 */
- { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */
+ { 0x5513, 0x1071, 0x8640 }, /* EasyNote K5305 */
/* end marker */
{ 0, }
};
@@ -76,7 +76,7 @@ static int sis_short_ata40(struct pci_dev *dev)
}
/**
- * sis_old_port_base - return PCI configuration base for dev
+ * sis_old_port_base - return PCI configuration base for dev
* @adev: device
*
* Returns the base of the PCI configuration registers for this port
@@ -85,11 +85,34 @@ static int sis_short_ata40(struct pci_dev *dev)
static int sis_old_port_base(struct ata_device *adev)
{
- return 0x40 + (4 * adev->link->ap->port_no) + (2 * adev->devno);
+ return 0x40 + (4 * adev->link->ap->port_no) + (2 * adev->devno);
}
/**
- * sis_133_cable_detect - check for 40/80 pin
+ * sis_port_base - return PCI configuration base for dev
+ * @adev: device
+ *
+ * Returns the base of the PCI configuration registers for this port
+ * number.
+ */
+
+static int sis_port_base(struct ata_device *adev)
+{
+ struct ata_port *ap = adev->link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int port = 0x40;
+ u32 reg54;
+
+ /* If bit 30 is set then the registers are mapped at 0x70 not 0x40 */
+ pci_read_config_dword(pdev, 0x54, &reg54);
+ if (reg54 & 0x40000000)
+ port = 0x70;
+
+ return port + (8 * ap->port_no) + (4 * adev->devno);
+}
+
+/**
+ * sis_133_cable_detect - check for 40/80 pin
* @ap: Port
* @deadline: deadline jiffies for the operation
*
@@ -110,7 +133,7 @@ static int sis_133_cable_detect(struct ata_port *ap)
}
/**
- * sis_66_cable_detect - check for 40/80 pin
+ * sis_66_cable_detect - check for 40/80 pin
* @ap: Port
*
* Perform cable detection on the UDMA66, UDMA100 and early UDMA133
@@ -132,7 +155,7 @@ static int sis_66_cable_detect(struct ata_port *ap)
/**
- * sis_pre_reset - probe begin
+ * sis_pre_reset - probe begin
* @link: ATA link
* @deadline: deadline jiffies for the operation
*
@@ -160,7 +183,7 @@ static int sis_pre_reset(struct ata_link *link, unsigned long deadline)
/**
- * sis_set_fifo - Set RWP fifo bits for this device
+ * sis_set_fifo - Set RWP fifo bits for this device
* @ap: Port
* @adev: Device
*
@@ -203,13 +226,13 @@ static void sis_set_fifo(struct ata_port *ap, struct ata_device *adev)
static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port = sis_old_port_base(adev);
u8 t1, t2;
int speed = adev->pio_mode - XFER_PIO_0;
- const u8 active[] = { 0x00, 0x07, 0x04, 0x03, 0x01 };
- const u8 recovery[] = { 0x00, 0x06, 0x04, 0x03, 0x03 };
+ static const u8 active[] = { 0x00, 0x07, 0x04, 0x03, 0x01 };
+ static const u8 recovery[] = { 0x00, 0x06, 0x04, 0x03, 0x03 };
sis_set_fifo(ap, adev);
@@ -240,11 +263,11 @@ static void sis_old_set_piomode (struct ata_port *ap, struct ata_device *adev)
static void sis_100_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port = sis_old_port_base(adev);
int speed = adev->pio_mode - XFER_PIO_0;
- const u8 actrec[] = { 0x00, 0x67, 0x44, 0x33, 0x31 };
+ static const u8 actrec[] = { 0x00, 0x67, 0x44, 0x33, 0x31 };
sis_set_fifo(ap, adev);
@@ -265,20 +288,19 @@ static void sis_100_set_piomode (struct ata_port *ap, struct ata_device *adev)
static void sis_133_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- int port = 0x40;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int port;
u32 t1;
- u32 reg54;
int speed = adev->pio_mode - XFER_PIO_0;
- const u32 timing133[] = {
+ static const u32 timing133[] = {
0x28269000, /* Recovery << 24 | Act << 16 | Ini << 12 */
0x0C266000,
0x04263000,
0x0C0A3000,
0x05093000
};
- const u32 timing100[] = {
+ static const u32 timing100[] = {
0x1E1C6000, /* Recovery << 24 | Act << 16 | Ini << 12 */
0x091C4000,
0x031C2000,
@@ -288,12 +310,7 @@ static void sis_133_set_piomode (struct ata_port *ap, struct ata_device *adev)
sis_set_fifo(ap, adev);
- /* If bit 14 is set then the registers are mapped at 0x70 not 0x40 */
- pci_read_config_dword(pdev, 0x54, &reg54);
- if (reg54 & 0x40000000)
- port = 0x70;
- port += 8 * ap->port_no + 4 * adev->devno;
-
+ port = sis_port_base(adev);
pci_read_config_dword(pdev, port, &t1);
t1 &= 0xC0C00FFF; /* Mask out timing */
@@ -319,13 +336,13 @@ static void sis_133_set_piomode (struct ata_port *ap, struct ata_device *adev)
static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int speed = adev->dma_mode - XFER_MW_DMA_0;
int drive_pci = sis_old_port_base(adev);
u16 timing;
- const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
- const u16 udma_bits[] = { 0xE000, 0xC000, 0xA000 };
+ static const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
+ static const u16 udma_bits[] = { 0xE000, 0xC000, 0xA000 };
pci_read_config_word(pdev, drive_pci, &timing);
@@ -358,14 +375,14 @@ static void sis_old_set_dmamode (struct ata_port *ap, struct ata_device *adev)
static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int speed = adev->dma_mode - XFER_MW_DMA_0;
int drive_pci = sis_old_port_base(adev);
u16 timing;
/* MWDMA 0-2 and UDMA 0-5 */
- const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
- const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000, 0x8000 };
+ static const u16 mwdma_bits[] = { 0x008, 0x302, 0x301 };
+ static const u16 udma_bits[] = { 0xF000, 0xD000, 0xB000, 0xA000, 0x9000, 0x8000 };
pci_read_config_word(pdev, drive_pci, &timing);
@@ -397,12 +414,12 @@ static void sis_66_set_dmamode (struct ata_port *ap, struct ata_device *adev)
static void sis_100_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int speed = adev->dma_mode - XFER_MW_DMA_0;
int drive_pci = sis_old_port_base(adev);
u8 timing;
- const u8 udma_bits[] = { 0x8B, 0x87, 0x85, 0x83, 0x82, 0x81};
+ static const u8 udma_bits[] = { 0x8B, 0x87, 0x85, 0x83, 0x82, 0x81};
pci_read_config_byte(pdev, drive_pci + 1, &timing);
@@ -431,7 +448,7 @@ static void sis_100_set_dmamode (struct ata_port *ap, struct ata_device *adev)
static void sis_133_early_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int speed = adev->dma_mode - XFER_MW_DMA_0;
int drive_pci = sis_old_port_base(adev);
u8 timing;
@@ -464,32 +481,34 @@ static void sis_133_early_set_dmamode (struct ata_port *ap, struct ata_device *a
static void sis_133_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- int speed = adev->dma_mode - XFER_MW_DMA_0;
- int port = 0x40;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int port;
u32 t1;
- u32 reg54;
-
- /* bits 4- cycle time 8 - cvs time */
- static const u32 timing_u100[] = { 0x6B0, 0x470, 0x350, 0x140, 0x120, 0x110, 0x000 };
- static const u32 timing_u133[] = { 0x9F0, 0x6A0, 0x470, 0x250, 0x230, 0x220, 0x210 };
-
- /* If bit 14 is set then the registers are mapped at 0x70 not 0x40 */
- pci_read_config_dword(pdev, 0x54, &reg54);
- if (reg54 & 0x40000000)
- port = 0x70;
- port += (8 * ap->port_no) + (4 * adev->devno);
+ port = sis_port_base(adev);
pci_read_config_dword(pdev, port, &t1);
if (adev->dma_mode < XFER_UDMA_0) {
+ /* Recovery << 24 | Act << 16 | Ini << 12, like PIO modes */
+ static const u32 timing_u100[] = { 0x19154000, 0x06072000, 0x04062000 };
+ static const u32 timing_u133[] = { 0x221C6000, 0x0C0A3000, 0x05093000 };
+ int speed = adev->dma_mode - XFER_MW_DMA_0;
+
+ t1 &= 0xC0C00FFF;
+ /* disable UDMA */
t1 &= ~0x00000004;
- /* FIXME: need data sheet to add MWDMA here. Also lacking on
- ide/pci driver */
+ if (t1 & 0x08)
+ t1 |= timing_u133[speed];
+ else
+ t1 |= timing_u100[speed];
} else {
- speed = adev->dma_mode - XFER_UDMA_0;
- /* if & 8 no UDMA133 - need info for ... */
+ /* bits 4- cycle time 8 - cvs time */
+ static const u32 timing_u100[] = { 0x6B0, 0x470, 0x350, 0x140, 0x120, 0x110, 0x000 };
+ static const u32 timing_u133[] = { 0x9F0, 0x6A0, 0x470, 0x250, 0x230, 0x220, 0x210 };
+ int speed = adev->dma_mode - XFER_UDMA_0;
+
t1 &= ~0x00000FF0;
+ /* enable UDMA */
t1 |= 0x00000004;
if (t1 & 0x08)
t1 |= timing_u133[speed];
@@ -499,6 +518,27 @@ static void sis_133_set_dmamode (struct ata_port *ap, struct ata_device *adev)
pci_write_config_dword(pdev, port, t1);
}
+/**
+ * sis_133_mode_filter - mode selection filter
+ * @adev: ATA device
+ *
+ * Block UDMA6 on devices that do not support it.
+ */
+
+static unsigned long sis_133_mode_filter(struct ata_device *adev, unsigned long mask)
+{
+ struct ata_port *ap = adev->link->ap;
+ struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+ int port = sis_port_base(adev);
+ u32 t1;
+
+ pci_read_config_dword(pdev, port, &t1);
+ /* if ATA133 is disabled, mask it out */
+ if (!(t1 & 0x08))
+ mask &= ~(0xC0 << ATA_SHIFT_UDMA);
+ return mask;
+}
+
static struct scsi_host_template sis_sht = {
ATA_BMDMA_SHT(DRV_NAME),
};
@@ -520,6 +560,7 @@ static struct ata_port_operations sis_133_ops = {
.set_piomode = sis_133_set_piomode,
.set_dmamode = sis_133_set_dmamode,
.cable_detect = sis_133_cable_detect,
+ .mode_filter = sis_133_mode_filter,
};
static struct ata_port_operations sis_133_early_ops = {
@@ -588,7 +629,7 @@ static const struct ata_port_info sis_info100_early = {
static const struct ata_port_info sis_info133 = {
.flags = ATA_FLAG_SLAVE_POSS,
.pio_mask = ATA_PIO4,
- /* No MWDMA */
+ .mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &sis_133_ops,
};
@@ -669,7 +710,7 @@ static void sis_fixup(struct pci_dev *pdev, struct sis_chipset *sis)
* @pdev: PCI device to register
* @ent: Entry in sis_pci_tbl matching with @pdev
*
- * Called from kernel PCI layer. We probe for combined mode (sigh),
+ * Called from kernel PCI layer. We probe for combined mode (sigh),
* and then hand over control to libata, for it to do the rest.
*
* LOCKING:
@@ -681,7 +722,6 @@ static void sis_fixup(struct pci_dev *pdev, struct sis_chipset *sis)
static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
const struct ata_port_info *ppi[] = { NULL, NULL };
struct pci_dev *host = NULL;
struct sis_chipset *chipset = NULL;
@@ -735,9 +775,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
0x0, &sis_info100
};
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev,
- "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
@@ -772,17 +810,20 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
switch(trueid) {
case 0x5518: /* SIS 962/963 */
+ dev_info(&pdev->dev,
+ "SiS 962/963 MuTIOL IDE UDMA133 controller\n");
chipset = &sis133;
if ((idemisc & 0x40000000) == 0) {
pci_write_config_dword(pdev, 0x54, idemisc | 0x40000000);
- printk(KERN_INFO "SIS5513: Switching to 5513 register mapping\n");
+ dev_info(&pdev->dev,
+ "Switching to 5513 register mapping\n");
}
break;
case 0x0180: /* SIS 965/965L */
- chipset = &sis133;
+ chipset = &sis133;
break;
case 0x1180: /* SIS 966/966L */
- chipset = &sis133;
+ chipset = &sis133;
break;
}
}
diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
index 7f5d020..24cf200 100644
--- a/drivers/ata/pata_sl82c105.c
+++ b/drivers/ata/pata_sl82c105.c
@@ -1,6 +1,7 @@
/*
* pata_sl82c105.c - SL82C105 PATA for new ATA layer
* (C) 2005 Red Hat Inc
+ * (C) 2011 Bartlomiej Zolnierkiewicz
*
* Based in part on linux/drivers/ide/pci/sl82c105.c
* SL82C105/Winbond 553 IDE driver
@@ -289,6 +290,14 @@ static int sl82c105_bridge_revision(struct pci_dev *pdev)
return bridge->revision;
}
+static void sl82c105_fixup(struct pci_dev *pdev)
+{
+ u32 val;
+
+ pci_read_config_dword(pdev, 0x40, &val);
+ val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
+ pci_write_config_dword(pdev, 0x40, val);
+}
static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
@@ -306,7 +315,6 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id
/* for now use only the first port */
const struct ata_port_info *ppi[] = { &info_early,
NULL };
- u32 val;
int rev;
int rc;
@@ -317,19 +325,36 @@ static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id
rev = sl82c105_bridge_revision(dev);
if (rev == -1)
- dev_printk(KERN_WARNING, &dev->dev, "pata_sl82c105: Unable to find bridge, disabling DMA.\n");
+ dev_warn(&dev->dev,
+ "pata_sl82c105: Unable to find bridge, disabling DMA\n");
else if (rev <= 5)
- dev_printk(KERN_WARNING, &dev->dev, "pata_sl82c105: Early bridge revision, no DMA available.\n");
+ dev_warn(&dev->dev,
+ "pata_sl82c105: Early bridge revision, no DMA available\n");
else
ppi[0] = &info_dma;
- pci_read_config_dword(dev, 0x40, &val);
- val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
- pci_write_config_dword(dev, 0x40, val);
+ sl82c105_fixup(dev);
return ata_pci_bmdma_init_one(dev, ppi, &sl82c105_sht, NULL, 0);
}
+#ifdef CONFIG_PM
+static int sl82c105_reinit_one(struct pci_dev *pdev)
+{
+ struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ int rc;
+
+ rc = ata_pci_device_do_resume(pdev);
+ if (rc)
+ return rc;
+
+ sl82c105_fixup(pdev);
+
+ ata_host_resume(host);
+ return 0;
+}
+#endif
+
static const struct pci_device_id sl82c105[] = {
{ PCI_VDEVICE(WINBOND, PCI_DEVICE_ID_WINBOND_82C105), },
@@ -340,7 +365,11 @@ static struct pci_driver sl82c105_pci_driver = {
.name = DRV_NAME,
.id_table = sl82c105,
.probe = sl82c105_init_one,
- .remove = ata_pci_remove_one
+ .remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = sl82c105_reinit_one,
+#endif
};
static int __init sl82c105_init(void)
diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
index b3e0c94..28da1c6 100644
--- a/drivers/ata/pata_triflex.c
+++ b/drivers/ata/pata_triflex.c
@@ -196,10 +196,8 @@ static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
.port_ops = &triflex_port_ops
};
const struct ata_port_info *ppi[] = { &info, NULL };
- static int printed_version;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&dev->dev, DRV_VERSION);
return ata_pci_bmdma_init_one(dev, ppi, &triflex_sht, NULL, 0);
}
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index d6d4f57..255f336 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -361,15 +361,15 @@ static unsigned long via_mode_filter(struct ata_device *dev, unsigned long mask)
if (config->id == PCI_DEVICE_ID_VIA_82C586_0) {
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
if (strcmp(model_num, "TS64GSSD25-M") == 0) {
- ata_dev_printk(dev, KERN_WARNING,
- "disabling UDMA mode due to reported lockups with this device.\n");
+ ata_dev_warn(dev,
+ "disabling UDMA mode due to reported lockups with this device\n");
mask &= ~ ATA_MASK_UDMA;
}
}
if (dev->class == ATA_DEV_ATAPI &&
dmi_check_system(no_atapi_dma_dmi_table)) {
- ata_dev_printk(dev, KERN_WARNING, "controller locks up on ATAPI DMA, forcing PIO\n");
+ ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n");
mask &= ATA_MASK_PIO;
}
@@ -509,6 +509,27 @@ static void via_config_fifo(struct pci_dev *pdev, unsigned int flags)
}
}
+static void via_fixup(struct pci_dev *pdev, const struct via_isa_bridge *config)
+{
+ u32 timing;
+
+ /* Initialise the FIFO for the enabled channels. */
+ via_config_fifo(pdev, config->flags);
+
+ if (config->udma_mask == ATA_UDMA4) {
+ /* The 66 MHz devices require we enable the clock */
+ pci_read_config_dword(pdev, 0x50, &timing);
+ timing |= 0x80008;
+ pci_write_config_dword(pdev, 0x50, timing);
+ }
+ if (config->flags & VIA_BAD_CLK66) {
+ /* Disable the 66MHz clock on problem devices */
+ pci_read_config_dword(pdev, 0x50, &timing);
+ timing &= ~0x80008;
+ pci_write_config_dword(pdev, 0x50, timing);
+ }
+}
+
/**
* via_init_one - discovery callback
* @pdev: PCI device
@@ -569,14 +590,11 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
const struct ata_port_info *ppi[] = { NULL, NULL };
struct pci_dev *isa;
const struct via_isa_bridge *config;
- static int printed_version;
u8 enable;
- u32 timing;
unsigned long flags = id->driver_data;
int rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
@@ -611,9 +629,6 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENODEV;
}
- /* Initialise the FIFO for the enabled channels. */
- via_config_fifo(pdev, config->flags);
-
/* Clock set up */
switch (config->udma_mask) {
case 0x00:
@@ -639,12 +654,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENODEV;
}
- if (config->flags & VIA_BAD_CLK66) {
- /* Disable the 66MHz clock on problem devices */
- pci_read_config_dword(pdev, 0x50, &timing);
- timing &= ~0x80008;
- pci_write_config_dword(pdev, 0x50, timing);
- }
+ via_fixup(pdev, config);
/* We have established the device type, now fire it up */
return ata_pci_bmdma_init_one(pdev, ppi, &via_sht, (void *)config, 0);
@@ -663,29 +673,14 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
static int via_reinit_one(struct pci_dev *pdev)
{
- u32 timing;
struct ata_host *host = dev_get_drvdata(&pdev->dev);
- const struct via_isa_bridge *config = host->private_data;
int rc;
rc = ata_pci_device_do_resume(pdev);
if (rc)
return rc;
- via_config_fifo(pdev, config->flags);
-
- if (config->udma_mask == ATA_UDMA4) {
- /* The 66 MHz devices require we enable the clock */
- pci_read_config_dword(pdev, 0x50, &timing);
- timing |= 0x80008;
- pci_write_config_dword(pdev, 0x50, timing);
- }
- if (config->flags & VIA_BAD_CLK66) {
- /* Disable the 66MHz clock on problem devices */
- pci_read_config_dword(pdev, 0x50, &timing);
- timing &= ~0x80008;
- pci_write_config_dword(pdev, 0x50, timing);
- }
+ via_fixup(pdev, host->private_data);
ata_host_resume(host);
return 0;
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 1111712..04911d5 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -596,14 +596,12 @@ static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n");
return rc;
}
return 0;
@@ -612,15 +610,13 @@ static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
static int adma_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int printed_version;
unsigned int board_idx = (unsigned int) ent->driver_data;
const struct ata_port_info *ppi[] = { &adma_port_info[board_idx], NULL };
struct ata_host *host;
void __iomem *mmio_base;
int rc, port_no;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* alloc host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, ADMA_PORTS);
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index dc88a39..0bec79e 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -766,11 +766,15 @@ static int dma_dwc_xfer_setup(struct scatterlist *sg, int num_elems,
static void dma_dwc_exit(struct sata_dwc_device *hsdev)
{
dev_dbg(host_pvt.dwc_dev, "%s:\n", __func__);
- if (host_pvt.sata_dma_regs)
+ if (host_pvt.sata_dma_regs) {
iounmap(host_pvt.sata_dma_regs);
+ host_pvt.sata_dma_regs = NULL;
+ }
- if (hsdev->irq_dma)
+ if (hsdev->irq_dma) {
free_irq(hsdev->irq_dma, hsdev);
+ hsdev->irq_dma = 0;
+ }
}
/*
@@ -787,7 +791,7 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
if (err) {
dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
" %d\n", __func__, err);
- goto error_out;
+ return err;
}
/* Enabe DMA */
@@ -798,11 +802,6 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
sata_dma_regs);
return 0;
-
-error_out:
- dma_dwc_exit(hsdev);
-
- return err;
}
static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
@@ -1325,7 +1324,7 @@ static int sata_dwc_port_start(struct ata_port *ap)
dev_err(ap->dev, "%s: dma_alloc_coherent failed\n",
__func__);
err = -ENOMEM;
- goto CLEANUP;
+ goto CLEANUP_ALLOC;
}
}
@@ -1345,15 +1344,13 @@ static int sata_dwc_port_start(struct ata_port *ap)
/* Clear any error bits before libata starts issuing commands */
clear_serror();
ap->private_data = hsdevp;
+ dev_dbg(ap->dev, "%s: done\n", __func__);
+ return 0;
+CLEANUP_ALLOC:
+ kfree(hsdevp);
CLEANUP:
- if (err) {
- sata_dwc_port_stop(ap);
- dev_dbg(ap->dev, "%s: fail\n", __func__);
- } else {
- dev_dbg(ap->dev, "%s: done\n", __func__);
- }
-
+ dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
return err;
}
@@ -1632,19 +1629,18 @@ static int sata_dwc_probe(struct platform_device *ofdev)
char *ver = (char *)&versionr;
u8 *base = NULL;
int err = 0;
- int irq, rc;
+ int irq;
struct ata_host *host;
struct ata_port_info pi = sata_dwc_port_info[0];
const struct ata_port_info *ppi[] = { &pi, NULL };
/* Allocate DWC SATA device */
- hsdev = kmalloc(sizeof(*hsdev), GFP_KERNEL);
+ hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL);
if (hsdev == NULL) {
dev_err(&ofdev->dev, "kmalloc failed for hsdev\n");
err = -ENOMEM;
- goto error_out;
+ goto error;
}
- memset(hsdev, 0, sizeof(*hsdev));
/* Ioremap SATA registers */
base = of_iomap(ofdev->dev.of_node, 0);
@@ -1652,7 +1648,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
dev_err(&ofdev->dev, "ioremap failed for SATA register"
" address\n");
err = -ENODEV;
- goto error_out;
+ goto error_kmalloc;
}
hsdev->reg_base = base;
dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
@@ -1665,7 +1661,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
if (!host) {
dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n");
err = -ENOMEM;
- goto error_out;
+ goto error_iomap;
}
host->private_data = hsdev;
@@ -1687,7 +1683,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
if (irq == NO_IRQ) {
dev_err(&ofdev->dev, "no SATA DMA irq\n");
err = -ENODEV;
- goto error_out;
+ goto error_iomap;
}
/* Get physical SATA DMA register base address */
@@ -1696,14 +1692,16 @@ static int sata_dwc_probe(struct platform_device *ofdev)
dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
" address\n");
err = -ENODEV;
- goto error_out;
+ goto error_iomap;
}
/* Save dev for later use in dev_xxx() routines */
host_pvt.dwc_dev = &ofdev->dev;
/* Initialize AHB DMAC */
- dma_dwc_init(hsdev, irq);
+ err = dma_dwc_init(hsdev, irq);
+ if (err)
+ goto error_dma_iomap;
/* Enable SATA Interrupts */
sata_dwc_enable_interrupts(hsdev);
@@ -1721,9 +1719,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
* device discovery process, invoking our port_start() handler &
* error_handler() to execute a dummy Softreset EH session
*/
- rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
-
- if (rc != 0)
+ err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
+ if (err)
dev_err(&ofdev->dev, "failed to activate host");
dev_set_drvdata(&ofdev->dev, host);
@@ -1732,9 +1729,13 @@ static int sata_dwc_probe(struct platform_device *ofdev)
error_out:
/* Free SATA DMA resources */
dma_dwc_exit(hsdev);
-
- if (base)
- iounmap(base);
+error_dma_iomap:
+ iounmap((void __iomem *)host_pvt.sata_dma_regs);
+error_iomap:
+ iounmap(base);
+error_kmalloc:
+ kfree(hsdev);
+error:
return err;
}
@@ -1750,6 +1751,7 @@ static int sata_dwc_remove(struct platform_device *ofdev)
/* Free SATA DMA resources */
dma_dwc_exit(hsdev);
+ iounmap((void __iomem *)host_pvt.sata_dma_regs);
iounmap(hsdev->reg_base);
kfree(hsdev);
kfree(host);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 35a71d8..54702f8 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -346,12 +346,11 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
/* warn if each s/g element is not dword aligned */
if (sg_addr & 0x03)
- ata_port_printk(qc->ap, KERN_ERR,
- "s/g addr unaligned : 0x%llx\n",
- (unsigned long long)sg_addr);
+ ata_port_err(qc->ap, "s/g addr unaligned : 0x%llx\n",
+ (unsigned long long)sg_addr);
if (sg_len & 0x03)
- ata_port_printk(qc->ap, KERN_ERR,
- "s/g len unaligned : 0x%x\n", sg_len);
+ ata_port_err(qc->ap, "s/g len unaligned : 0x%x\n",
+ sg_len);
if (num_prde == (SATA_FSL_MAX_PRD_DIRECT - 1) &&
sg_next(sg) != NULL) {
@@ -661,8 +660,7 @@ static int sata_fsl_port_start(struct ata_port *ap)
sata_fsl_scr_write(&ap->link, SCR_CONTROL, temp);
sata_fsl_scr_read(&ap->link, SCR_CONTROL, &temp);
- dev_printk(KERN_WARNING, dev, "scr_control, speed limited to %x\n",
- temp);
+ dev_warn(dev, "scr_control, speed limited to %x\n", temp);
#endif
return 0;
@@ -740,8 +738,7 @@ try_offline_again:
1, 500);
if (temp & ONLINE) {
- ata_port_printk(ap, KERN_ERR,
- "Hardreset failed, not off-lined %d\n", i);
+ ata_port_err(ap, "Hardreset failed, not off-lined %d\n", i);
/*
* Try to offline controller atleast twice
@@ -777,8 +774,7 @@ try_offline_again:
temp = ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, 0, 1, 500);
if (!(temp & ONLINE)) {
- ata_port_printk(ap, KERN_ERR,
- "Hardreset failed, not on-lined\n");
+ ata_port_err(ap, "Hardreset failed, not on-lined\n");
goto err;
}
@@ -794,9 +790,8 @@ try_offline_again:
temp = ata_wait_register(ap, hcr_base + HSTATUS, 0xFF, 0, 1, 500);
if ((!(temp & 0x10)) || ata_link_offline(link)) {
- ata_port_printk(ap, KERN_WARNING,
- "No Device OR PHYRDY change,Hstatus = 0x%x\n",
- ioread32(hcr_base + HSTATUS));
+ ata_port_warn(ap, "No Device OR PHYRDY change,Hstatus = 0x%x\n",
+ ioread32(hcr_base + HSTATUS));
*class = ATA_DEV_NONE;
return 0;
}
@@ -809,13 +804,12 @@ try_offline_again:
500, jiffies_to_msecs(deadline - start_jiffies));
if ((temp & 0xFF) != 0x18) {
- ata_port_printk(ap, KERN_WARNING, "No Signature Update\n");
+ ata_port_warn(ap, "No Signature Update\n");
*class = ATA_DEV_NONE;
goto do_followup_srst;
} else {
- ata_port_printk(ap, KERN_INFO,
- "Signature Update detected @ %d msecs\n",
- jiffies_to_msecs(jiffies - start_jiffies));
+ ata_port_info(ap, "Signature Update detected @ %d msecs\n",
+ jiffies_to_msecs(jiffies - start_jiffies));
*class = sata_fsl_dev_classify(ap);
return 0;
}
@@ -890,7 +884,7 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
temp = ata_wait_register(ap, CQ + hcr_base, 0x1, 0x1, 1, 5000);
if (temp & 0x1) {
- ata_port_printk(ap, KERN_WARNING, "ATA_SRST issue failed\n");
+ ata_port_warn(ap, "ATA_SRST issue failed\n");
DPRINTK("Softreset@5000,CQ=0x%x,CA=0x%x,CC=0x%x\n",
ioread32(CQ + hcr_base),
@@ -1202,8 +1196,7 @@ static irqreturn_t sata_fsl_interrupt(int irq, void *dev_instance)
if (ap) {
sata_fsl_host_intr(ap);
} else {
- dev_printk(KERN_WARNING, host->dev,
- "interrupt on disabled port 0\n");
+ dev_warn(host->dev, "interrupt on disabled port 0\n");
}
iowrite32(interrupt_enables, hcr_base + HSTATUS);
@@ -1317,8 +1310,7 @@ static int sata_fsl_probe(struct platform_device *ofdev)
struct ata_port_info pi = sata_fsl_port_info[0];
const struct ata_port_info *ppi[] = { &pi, NULL };
- dev_printk(KERN_INFO, &ofdev->dev,
- "Sata FSL Platform/CSB Driver init\n");
+ dev_info(&ofdev->dev, "Sata FSL Platform/CSB Driver init\n");
hcr_base = of_iomap(ofdev->dev.of_node, 0);
if (!hcr_base)
@@ -1346,8 +1338,8 @@ static int sata_fsl_probe(struct platform_device *ofdev)
host_priv->csr_base = csr_base;
irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- if (irq < 0) {
- dev_printk(KERN_ERR, &ofdev->dev, "invalid irq from platform\n");
+ if (!irq) {
+ dev_err(&ofdev->dev, "invalid irq from platform\n");
goto error_exit_with_cleanup;
}
host_priv->irq = irq;
@@ -1422,8 +1414,7 @@ static int sata_fsl_resume(struct platform_device *op)
ret = sata_fsl_init_controller(host);
if (ret) {
- dev_printk(KERN_ERR, &op->dev,
- "Error initialize hardware\n");
+ dev_err(&op->dev, "Error initializing hardware\n");
return ret;
}
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 15391ee..3a8b55e 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -408,9 +408,8 @@ static void inic_host_intr(struct ata_port *ap)
}
spurious:
- ata_port_printk(ap, KERN_WARNING, "unhandled interrupt: "
- "cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n",
- qc ? qc->tf.command : 0xff, irq_stat, idma_stat);
+ ata_port_warn(ap, "unhandled interrupt: cmd=0x%x irq_stat=0x%x idma_stat=0x%x\n",
+ qc ? qc->tf.command : 0xff, irq_stat, idma_stat);
}
static irqreturn_t inic_interrupt(int irq, void *dev_instance)
@@ -631,8 +630,9 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
rc = sata_link_resume(link, timing, deadline);
if (rc) {
- ata_link_printk(link, KERN_WARNING, "failed to resume "
- "link after reset (errno=%d)\n", rc);
+ ata_link_warn(link,
+ "failed to resume link after reset (errno=%d)\n",
+ rc);
return rc;
}
@@ -644,8 +644,9 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
rc = ata_wait_after_reset(link, deadline, inic_check_ready);
/* link occupied, -ENODEV too is an error */
if (rc) {
- ata_link_printk(link, KERN_WARNING, "device not ready "
- "after hardreset (errno=%d)\n", rc);
+ ata_link_warn(link,
+ "device not ready after hardreset (errno=%d)\n",
+ rc);
return rc;
}
@@ -811,7 +812,6 @@ static int inic_pci_device_resume(struct pci_dev *pdev)
static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
const struct ata_port_info *ppi[] = { &inic_port_info, NULL };
struct ata_host *host;
struct inic_host_priv *hpriv;
@@ -819,8 +819,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int mmio_bar;
int i, rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@vger.kernel.org if in production use\n");
@@ -861,15 +860,13 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Set dma_mask. This devices doesn't support 64bit addressing. */
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n");
return rc;
}
@@ -880,15 +877,13 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
rc = pci_set_dma_max_seg_size(pdev, 65536 - 512);
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "failed to set the maximum segment size.\n");
+ dev_err(&pdev->dev, "failed to set the maximum segment size\n");
return rc;
}
rc = init_controller(hpriv->mmio_base, hpriv->cached_hctl);
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "failed to initialize controller\n");
+ dev_err(&pdev->dev, "failed to initialize controller\n");
return rc;
}
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index b52c051..0b8b8b4 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1190,7 +1190,7 @@ static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
break;
udelay(per_loop);
}
- /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
+ /* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
}
/**
@@ -1228,7 +1228,7 @@ static int mv_stop_edma(struct ata_port *ap)
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
mv_wait_for_edma_empty_idle(ap);
if (mv_stop_edma_engine(port_mmio)) {
- ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
+ ata_port_err(ap, "Unable to stop eDMA\n");
err = -EIO;
}
mv_edma_cfg(ap, 0, 0);
@@ -1382,7 +1382,7 @@ static void mv6_dev_config(struct ata_device *adev)
if (adev->flags & ATA_DFLAG_NCQ) {
if (sata_pmp_attached(adev->link->ap)) {
adev->flags &= ~ATA_DFLAG_NCQ;
- ata_dev_printk(adev, KERN_INFO,
+ ata_dev_info(adev,
"NCQ disabled for command-based switching\n");
}
}
@@ -2225,9 +2225,8 @@ static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
/* See if it worked */
if ((ifstat & 0x3000) != 0x1000) {
- ata_port_printk(ap, KERN_WARNING,
- "%s transmission error, ifstat=%08x\n",
- __func__, ifstat);
+ ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
+ __func__, ifstat);
return AC_ERR_OTHER;
}
return 0;
@@ -2342,9 +2341,9 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
*/
if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
--limit_warnings;
- ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
- ": attempting PIO w/multiple DRQ: "
- "this may fail due to h/w errata\n");
+ ata_link_warn(qc->dev->link, DRV_NAME
+ ": attempting PIO w/multiple DRQ: "
+ "this may fail due to h/w errata\n");
}
/* drop through */
case ATA_PROT_NODATA:
@@ -2499,20 +2498,20 @@ static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
}
failed_links = hweight16(new_map);
- ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
- "failed_links=%d nr_active_links=%d\n",
- __func__, pp->delayed_eh_pmp_map,
- ap->qc_active, failed_links,
- ap->nr_active_links);
+ ata_port_info(ap,
+ "%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n",
+ __func__, pp->delayed_eh_pmp_map,
+ ap->qc_active, failed_links,
+ ap->nr_active_links);
if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
mv_process_crpb_entries(ap, pp);
mv_stop_edma(ap);
mv_eh_freeze(ap);
- ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
+ ata_port_info(ap, "%s: done\n", __func__);
return 1; /* handled */
}
- ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
+ ata_port_info(ap, "%s: waiting\n", __func__);
return 1; /* handled */
}
@@ -2554,9 +2553,8 @@ static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
* and we cannot handle it here.
*/
if (edma_err_cause & EDMA_ERR_SELF_DIS) {
- ata_port_printk(ap, KERN_WARNING,
- "%s: err_cause=0x%x pp_flags=0x%x\n",
- __func__, edma_err_cause, pp->pp_flags);
+ ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
+ __func__, edma_err_cause, pp->pp_flags);
return 0; /* not handled */
}
return mv_handle_fbs_ncq_dev_err(ap);
@@ -2567,9 +2565,8 @@ static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
* and we cannot handle it here.
*/
if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
- ata_port_printk(ap, KERN_WARNING,
- "%s: err_cause=0x%x pp_flags=0x%x\n",
- __func__, edma_err_cause, pp->pp_flags);
+ ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
+ __func__, edma_err_cause, pp->pp_flags);
return 0; /* not handled */
}
return mv_handle_fbs_non_ncq_dev_err(ap);
@@ -2930,8 +2927,7 @@ static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
err_cause = readl(mmio + hpriv->irq_cause_offset);
- dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
- err_cause);
+ dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
DPRINTK("All regs @ PCI error\n");
mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
@@ -3760,8 +3756,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
default:
- dev_printk(KERN_WARNING, &pdev->dev,
- "Applying 50XXB2 workarounds to unknown rev\n");
+ dev_warn(&pdev->dev,
+ "Applying 50XXB2 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
}
@@ -3780,8 +3776,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
default:
- dev_printk(KERN_WARNING, &pdev->dev,
- "Applying B2 workarounds to unknown rev\n");
+ dev_warn(&pdev->dev,
+ "Applying B2 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_50XXB2;
break;
}
@@ -3801,8 +3797,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
hp_flags |= MV_HP_ERRATA_60X1C0;
break;
default:
- dev_printk(KERN_WARNING, &pdev->dev,
- "Applying B2 workarounds to unknown rev\n");
+ dev_warn(&pdev->dev,
+ "Applying B2 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_60X1B2;
break;
}
@@ -3851,8 +3847,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
hp_flags |= MV_HP_ERRATA_60X1C0;
break;
default:
- dev_printk(KERN_WARNING, &pdev->dev,
- "Applying 60X1C0 workarounds to unknown rev\n");
+ dev_warn(&pdev->dev,
+ "Applying 60X1C0 workarounds to unknown rev\n");
hp_flags |= MV_HP_ERRATA_60X1C0;
break;
}
@@ -3867,8 +3863,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
break;
default:
- dev_printk(KERN_ERR, host->dev,
- "BUG: invalid board index %u\n", board_idx);
+ dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
return 1;
}
@@ -4023,7 +4018,6 @@ static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
*/
static int mv_platform_probe(struct platform_device *pdev)
{
- static int printed_version;
const struct mv_sata_platform_data *mv_platform_data;
const struct ata_port_info *ppi[] =
{ &mv_port_info[chip_soc], NULL };
@@ -4032,8 +4026,7 @@ static int mv_platform_probe(struct platform_device *pdev)
struct resource *res;
int n_ports, rc;
- if (!printed_version++)
- dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/*
* Simple resource validation ..
@@ -4091,12 +4084,14 @@ static int mv_platform_probe(struct platform_device *pdev)
if (rc)
goto err;
- dev_printk(KERN_INFO, &pdev->dev,
- "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
- host->n_ports);
+ dev_info(&pdev->dev, "slots %u ports %d\n",
+ (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
+
+ rc = ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
+ IRQF_SHARED, &mv6_sht);
+ if (!rc)
+ return 0;
- return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
- IRQF_SHARED, &mv6_sht);
err:
#if defined(CONFIG_HAVE_CLK)
if (!IS_ERR(hpriv->clk)) {
@@ -4118,8 +4113,7 @@ err:
*/
static int __devexit mv_platform_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct ata_host *host = dev_get_drvdata(dev);
+ struct ata_host *host = platform_get_drvdata(pdev);
#if defined(CONFIG_HAVE_CLK)
struct mv_host_priv *hpriv = host->private_data;
#endif
@@ -4137,7 +4131,7 @@ static int __devexit mv_platform_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
{
- struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ata_host *host = platform_get_drvdata(pdev);
if (host)
return ata_host_suspend(host, state);
else
@@ -4146,7 +4140,7 @@ static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
static int mv_platform_resume(struct platform_device *pdev)
{
- struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ata_host *host = platform_get_drvdata(pdev);
int ret;
if (host) {
@@ -4217,22 +4211,21 @@ static int pci_go_64(struct pci_dev *pdev)
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "64-bit DMA enable failed\n");
+ dev_err(&pdev->dev,
+ "64-bit DMA enable failed\n");
return rc;
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
+ dev_err(&pdev->dev,
+ "32-bit consistent DMA enable failed\n");
return rc;
}
}
@@ -4276,10 +4269,9 @@ static void mv_print_info(struct ata_host *host)
else
gen = "?";
- dev_printk(KERN_INFO, &pdev->dev,
- "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
- gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
- scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
+ dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
+ gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
+ scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
}
/**
@@ -4293,15 +4285,13 @@ static void mv_print_info(struct ata_host *host)
static int mv_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int printed_version;
unsigned int board_idx = (unsigned int)ent->driver_data;
const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
struct ata_host *host;
struct mv_host_priv *hpriv;
int n_ports, port, rc;
- if (!printed_version++)
- dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* allocate host */
n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
@@ -4365,7 +4355,7 @@ static int mv_pci_init_one(struct pci_dev *pdev,
#ifdef CONFIG_PM
static int mv_pci_device_resume(struct pci_dev *pdev)
{
- struct ata_host *host = dev_get_drvdata(&pdev->dev);
+ struct ata_host *host = pci_get_drvdata(pdev);
int rc;
rc = ata_pci_device_do_resume(pdev);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index f173ef3..e0bc964 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -620,9 +620,8 @@ static void nv_adma_register_mode(struct ata_port *ap)
count++;
}
if (count == 20)
- ata_port_printk(ap, KERN_WARNING,
- "timeout waiting for ADMA IDLE, stat=0x%hx\n",
- status);
+ ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
+ status);
tmp = readw(mmio + NV_ADMA_CTL);
writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
@@ -635,9 +634,9 @@ static void nv_adma_register_mode(struct ata_port *ap)
count++;
}
if (count == 20)
- ata_port_printk(ap, KERN_WARNING,
- "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
- status);
+ ata_port_warn(ap,
+ "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
+ status);
pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
}
@@ -665,7 +664,7 @@ static void nv_adma_mode(struct ata_port *ap)
count++;
}
if (count == 20)
- ata_port_printk(ap, KERN_WARNING,
+ ata_port_warn(ap,
"timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
status);
@@ -772,10 +771,10 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
blk_queue_max_segments(sdev->request_queue, sg_tablesize);
- ata_port_printk(ap, KERN_INFO,
- "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
- (unsigned long long)*ap->host->dev->dma_mask,
- segment_boundary, sg_tablesize);
+ ata_port_info(ap,
+ "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
+ (unsigned long long)*ap->host->dev->dma_mask,
+ segment_boundary, sg_tablesize);
spin_unlock_irqrestore(ap->lock, flags);
@@ -1443,8 +1442,7 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
existing commands. */
if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
(qc->flags & ATA_QCFLAG_RESULT_TF))) {
- ata_dev_printk(qc->dev, KERN_ERR,
- "NCQ w/ RESULT_TF not allowed\n");
+ ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
return AC_ERR_SYSTEM;
}
@@ -1581,15 +1579,15 @@ static int nv_hardreset(struct ata_link *link, unsigned int *class,
int rc;
if (!(ehc->i.flags & ATA_EHI_QUIET))
- ata_link_printk(link, KERN_INFO, "nv: skipping "
- "hardreset on occupied port\n");
+ ata_link_info(link,
+ "nv: skipping hardreset on occupied port\n");
/* make sure the link is online */
rc = sata_link_resume(link, timing, deadline);
/* whine about phy resume failure but proceed */
if (rc && rc != -EOPNOTSUPP)
- ata_link_printk(link, KERN_WARNING, "failed to resume "
- "link (errno=%d)\n", rc);
+ ata_link_warn(link, "failed to resume link (errno=%d)\n",
+ rc);
}
/* device signature acquisition is unreliable */
@@ -1686,7 +1684,7 @@ static void nv_adma_error_handler(struct ata_port *ap)
u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
- ata_port_printk(ap, KERN_ERR,
+ ata_port_err(ap,
"EH in ADMA mode, notifier 0x%X "
"notifier_error 0x%X gen_ctl 0x%X status 0x%X "
"next cpb count 0x%X next cpb idx 0x%x\n",
@@ -1697,7 +1695,7 @@ static void nv_adma_error_handler(struct ata_port *ap)
struct nv_adma_cpb *cpb = &pp->cpb[i];
if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
ap->link.sactive & (1 << i))
- ata_port_printk(ap, KERN_ERR,
+ ata_port_err(ap,
"CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
i, cpb->ctl_flags, cpb->resp_flags);
}
@@ -1799,23 +1797,22 @@ static void nv_swncq_ncq_stop(struct ata_port *ap)
u32 sactive;
u32 done_mask;
- ata_port_printk(ap, KERN_ERR,
- "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
- ap->qc_active, ap->link.sactive);
- ata_port_printk(ap, KERN_ERR,
+ ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
+ ap->qc_active, ap->link.sactive);
+ ata_port_err(ap,
"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
"dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
- ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
- ap->ops->sff_check_status(ap),
- ioread8(ap->ioaddr.error_addr));
+ ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
+ ap->ops->sff_check_status(ap),
+ ioread8(ap->ioaddr.error_addr));
sactive = readl(pp->sactive_block);
done_mask = pp->qc_active ^ sactive;
- ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
+ ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
for (i = 0; i < ATA_MAX_QUEUE; i++) {
u8 err = 0;
if (pp->qc_active & (1 << i))
@@ -1825,13 +1822,13 @@ static void nv_swncq_ncq_stop(struct ata_port *ap)
else
continue;
- ata_port_printk(ap, KERN_ERR,
- "tag 0x%x: %01x %01x %01x %01x %s\n", i,
- (pp->dhfis_bits >> i) & 0x1,
- (pp->dmafis_bits >> i) & 0x1,
- (pp->sdbfis_bits >> i) & 0x1,
- (sactive >> i) & 0x1,
- (err ? "error! tag doesn't exit" : " "));
+ ata_port_err(ap,
+ "tag 0x%x: %01x %01x %01x %01x %s\n", i,
+ (pp->dhfis_bits >> i) & 0x1,
+ (pp->dmafis_bits >> i) & 0x1,
+ (pp->sdbfis_bits >> i) & 0x1,
+ (sactive >> i) & 0x1,
+ (err ? "error! tag doesn't exit" : " "));
}
nv_swncq_pp_reinit(ap);
@@ -1956,8 +1953,8 @@ static int nv_swncq_slave_config(struct scsi_device *sdev)
if (strncmp(model_num, "Maxtor", 6) == 0) {
ata_scsi_change_queue_depth(sdev, 1, SCSI_QDEPTH_DEFAULT);
- ata_dev_printk(dev, KERN_NOTICE,
- "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
+ ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
+ sdev->queue_depth);
}
return rc;
@@ -2356,7 +2353,6 @@ static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
const struct ata_port_info *ppi[] = { NULL, NULL };
struct nv_pi_priv *ipriv;
struct ata_host *host;
@@ -2373,8 +2369,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_resource_start(pdev, bar) == 0)
return -ENODEV;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
@@ -2382,10 +2377,10 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* determine type and allocate host */
if (type == CK804 && adma_enabled) {
- dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
+ dev_notice(&pdev->dev, "Using ADMA mode\n");
type = ADMA;
} else if (type == MCP5x && swncq_enabled) {
- dev_printk(KERN_NOTICE, &pdev->dev, "Using SWNCQ mode\n");
+ dev_notice(&pdev->dev, "Using SWNCQ mode\n");
type = SWNCQ;
}
@@ -2429,7 +2424,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
nv_swncq_host_init(host);
if (msi_enabled) {
- dev_printk(KERN_NOTICE, &pdev->dev, "Using MSI\n");
+ dev_notice(&pdev->dev, "Using MSI\n");
pci_enable_msi(pdev);
}
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index ca4646a..ef6e328 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -1184,7 +1184,6 @@ static void pdc_host_init(struct ata_host *host)
static int pdc_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int printed_version;
const struct ata_port_info *pi = &pdc_port_info[ent->driver_data];
const struct ata_port_info *ppi[PDC_MAX_PORTS];
struct ata_host *host;
@@ -1193,8 +1192,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
int n_ports, i, rc;
int is_sataii_tx4;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* enable and acquire resources */
rc = pcim_enable_device(pdev);
@@ -1223,7 +1221,7 @@ static int pdc_ata_init_one(struct pci_dev *pdev,
host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
if (!host) {
- dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n");
+ dev_err(&pdev->dev, "failed to allocate host\n");
return -ENOMEM;
}
hpriv = devm_kzalloc(&pdev->dev, sizeof *hpriv, GFP_KERNEL);
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index c560326..9d1a47b 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -563,21 +563,20 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "64-bit DMA enable failed\n");
+ dev_err(&pdev->dev,
+ "64-bit DMA enable failed\n");
return rc;
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
+ dev_err(&pdev->dev,
"32-bit consistent DMA enable failed\n");
return rc;
}
@@ -588,14 +587,12 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
static int qs_ata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int printed_version;
unsigned int board_idx = (unsigned int) ent->driver_data;
const struct ata_port_info *ppi[] = { &qs_port_info[board_idx], NULL };
struct ata_host *host;
int rc, port_no;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* alloc host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, QS_PORTS);
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index b42edaa..0c4ed89 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -157,6 +157,7 @@ static const struct sil_drivelist {
{ "ST380011ASL", SIL_QUIRK_MOD15WRITE },
{ "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
{ "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
+ { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
{ "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
{ }
};
@@ -438,7 +439,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
u8 status;
if (unlikely(bmdma2 & SIL_DMA_SATA_IRQ)) {
- u32 serror;
+ u32 serror = 0xffffffff;
/* SIEN doesn't mask SATA IRQs on some 3112s. Those
* controllers continue to assert IRQ as long as
@@ -643,8 +644,8 @@ static void sil_dev_config(struct ata_device *dev)
((ap->flags & SIL_FLAG_MOD15WRITE) &&
(quirks & SIL_QUIRK_MOD15WRITE))) {
if (print_info)
- ata_dev_printk(dev, KERN_INFO, "applying Seagate "
- "errata fix (mod15write workaround)\n");
+ ata_dev_info(dev,
+ "applying Seagate errata fix (mod15write workaround)\n");
dev->max_sectors = 15;
return;
}
@@ -652,8 +653,8 @@ static void sil_dev_config(struct ata_device *dev)
/* limit to udma5 */
if (quirks & SIL_QUIRK_UDMA5MAX) {
if (print_info)
- ata_dev_printk(dev, KERN_INFO, "applying Maxtor "
- "errata fix %s\n", model_num);
+ ata_dev_info(dev, "applying Maxtor errata fix %s\n",
+ model_num);
dev->udma_mask &= ATA_UDMA5;
return;
}
@@ -676,8 +677,8 @@ static void sil_init_controller(struct ata_host *host)
writew(cls << 8 | cls,
mmio_base + sil_port[i].fifo_cfg);
} else
- dev_printk(KERN_WARNING, &pdev->dev,
- "cache line size not set. Driver may not function\n");
+ dev_warn(&pdev->dev,
+ "cache line size not set. Driver may not function\n");
/* Apply R_ERR on DMA activate FIS errata workaround */
if (host->ports[0]->flags & SIL_FLAG_RERR_ON_DMA_ACT) {
@@ -688,9 +689,8 @@ static void sil_init_controller(struct ata_host *host)
if ((tmp & 0x3) != 0x01)
continue;
if (!cnt)
- dev_printk(KERN_INFO, &pdev->dev,
- "Applying R_ERR on DMA activate "
- "FIS errata fix\n");
+ dev_info(&pdev->dev,
+ "Applying R_ERR on DMA activate FIS errata fix\n");
writel(tmp & ~0x3, mmio_base + sil_port[i].sfis_cfg);
cnt++;
}
@@ -733,7 +733,6 @@ static bool sil_broken_system_poweroff(struct pci_dev *pdev)
static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
int board_id = ent->driver_data;
struct ata_port_info pi = sil_port_info[board_id];
const struct ata_port_info *ppi[] = { &pi, NULL };
@@ -742,8 +741,7 @@ static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int n_ports, rc;
unsigned int i;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* allocate host */
n_ports = 2;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 06c564e..2178022 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -246,7 +246,7 @@ enum {
/* host flags */
SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
- ATA_FLAG_AN | ATA_FLAG_PMP,
+ ATA_FLAG_AN | ATA_FLAG_PMP | ATA_FLAG_LOWTAG,
SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
IRQ_STAT_4PORTS = 0xf,
@@ -268,7 +268,7 @@ union sil24_cmd_block {
struct sil24_atapi_block atapi;
};
-static struct sil24_cerr_info {
+static const struct sil24_cerr_info {
unsigned int err_mask, action;
const char *desc;
} sil24_cerr_db[] = {
@@ -694,7 +694,7 @@ static int sil24_softreset(struct ata_link *link, unsigned int *class,
return 0;
err:
- ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
+ ata_link_err(link, "softreset failed (%s)\n", reason);
return -EIO;
}
@@ -714,8 +714,8 @@ static int sil24_hardreset(struct ata_link *link, unsigned int *class,
* This happens often after PM DMA CS errata.
*/
if (pp->do_port_rst) {
- ata_port_printk(ap, KERN_WARNING, "controller in dubious "
- "state, performing PORT_RST\n");
+ ata_port_warn(ap,
+ "controller in dubious state, performing PORT_RST\n");
writel(PORT_CS_PORT_RST, port + PORT_CTRL_STAT);
ata_msleep(ap, 10);
@@ -773,7 +773,7 @@ static int sil24_hardreset(struct ata_link *link, unsigned int *class,
goto retry;
}
- ata_link_printk(link, KERN_ERR, "hardreset failed (%s)\n", reason);
+ ata_link_err(link, "hardreset failed (%s)\n", reason);
return -EIO;
}
@@ -925,7 +925,7 @@ static void sil24_pmp_attach(struct ata_port *ap)
if (sata_pmp_gscr_vendor(gscr) == 0x11ab &&
sata_pmp_gscr_devid(gscr) == 0x4140) {
- ata_port_printk(ap, KERN_INFO,
+ ata_port_info(ap,
"disabling NCQ support due to sil24-mv4140 quirk\n");
ap->flags &= ~ATA_FLAG_NCQ;
}
@@ -946,8 +946,7 @@ static int sil24_pmp_hardreset(struct ata_link *link, unsigned int *class,
rc = sil24_init_port(link->ap);
if (rc) {
- ata_link_printk(link, KERN_ERR,
- "hardreset failed (port not ready)\n");
+ ata_link_err(link, "hardreset failed (port not ready)\n");
return rc;
}
@@ -1020,7 +1019,7 @@ static void sil24_error_intr(struct ata_port *ap)
/* deal with command error */
if (irq_stat & PORT_IRQ_ERROR) {
- struct sil24_cerr_info *ci = NULL;
+ const struct sil24_cerr_info *ci = NULL;
unsigned int err_mask = 0, action = 0;
u32 context, cerr;
int pmp;
@@ -1141,8 +1140,8 @@ static inline void sil24_host_intr(struct ata_port *ap)
/* spurious interrupts are expected if PCIX_IRQ_WOC */
if (!(ap->flags & SIL24_FLAG_PCIX_IRQ_WOC) && ata_ratelimit())
- ata_port_printk(ap, KERN_INFO, "spurious interrupt "
- "(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
+ ata_port_info(ap,
+ "spurious interrupt (slot_stat 0x%x active_tag %d sactive 0x%x)\n",
slot_stat, ap->link.active_tag, ap->link.sactive);
}
@@ -1256,8 +1255,8 @@ static void sil24_init_controller(struct ata_host *host)
PORT_CS_PORT_RST,
PORT_CS_PORT_RST, 10, 100);
if (tmp & PORT_CS_PORT_RST)
- dev_printk(KERN_ERR, host->dev,
- "failed to clear port RST\n");
+ dev_err(host->dev,
+ "failed to clear port RST\n");
}
/* configure port */
@@ -1271,7 +1270,6 @@ static void sil24_init_controller(struct ata_host *host)
static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
extern int __MARKER__sil24_cmd_block_is_sized_wrongly;
- static int printed_version;
struct ata_port_info pi = sil24_port_info[ent->driver_data];
const struct ata_port_info *ppi[] = { &pi, NULL };
void __iomem * const *iomap;
@@ -1283,8 +1281,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (sizeof(union sil24_cmd_block) != PAGE_SIZE)
__MARKER__sil24_cmd_block_is_sized_wrongly = 1;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* acquire resources */
rc = pcim_enable_device(pdev);
@@ -1302,9 +1299,8 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (pi.flags & SIL24_FLAG_PCIX_IRQ_WOC) {
tmp = readl(iomap[SIL24_HOST_BAR] + HOST_CTRL);
if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
- dev_printk(KERN_INFO, &pdev->dev,
- "Applying completion IRQ loss on PCI-X "
- "errata fix\n");
+ dev_info(&pdev->dev,
+ "Applying completion IRQ loss on PCI-X errata fix\n");
else
pi.flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
}
@@ -1322,22 +1318,21 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "64-bit DMA enable failed\n");
+ dev_err(&pdev->dev,
+ "64-bit DMA enable failed\n");
return rc;
}
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit DMA enable failed\n");
+ dev_err(&pdev->dev, "32-bit DMA enable failed\n");
return rc;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
+ dev_err(&pdev->dev,
+ "32-bit consistent DMA enable failed\n");
return rc;
}
}
@@ -1350,7 +1345,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
sil24_init_controller(host);
if (sata_sil24_msi && !pci_enable_msi(pdev)) {
- dev_printk(KERN_INFO, &pdev->dev, "Using MSI\n");
+ dev_info(&pdev->dev, "Using MSI\n");
pci_intx(pdev, 0);
}
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index cdcc13e..95ec435 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -104,7 +104,7 @@ static const struct ata_port_info sis_port_info = {
};
MODULE_AUTHOR("Uwe Koziolek");
-MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller");
+MODULE_DESCRIPTION("low-level driver for Silicon Integrated Systems SATA controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sis_pci_tbl);
MODULE_VERSION(DRV_VERSION);
@@ -193,7 +193,6 @@ static int sis_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
struct ata_port_info pi = sis_port_info;
const struct ata_port_info *ppi[] = { &pi, &pi };
struct ata_host *host;
@@ -202,8 +201,7 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u8 port2_start = 0x20;
int i, rc;
- if (!printed_version++)
- dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
@@ -241,12 +239,12 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
if ((pmr & SIS_PMR_COMBINED) == 0) {
- dev_printk(KERN_INFO, &pdev->dev,
- "Detected SiS 180/181/964 chipset in SATA mode\n");
+ dev_info(&pdev->dev,
+ "Detected SiS 180/181/964 chipset in SATA mode\n");
port2_start = 64;
} else {
- dev_printk(KERN_INFO, &pdev->dev,
- "Detected SiS 180/181 chipset in combined mode\n");
+ dev_info(&pdev->dev,
+ "Detected SiS 180/181 chipset in combined mode\n");
port2_start = 0;
pi.flags |= ATA_FLAG_SLAVE_POSS;
}
@@ -256,24 +254,22 @@ static int sis_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
case 0x0183:
pci_read_config_dword(pdev, 0x6C, &val);
if (val & (1L << 31)) {
- dev_printk(KERN_INFO, &pdev->dev,
- "Detected SiS 182/965 chipset\n");
+ dev_info(&pdev->dev, "Detected SiS 182/965 chipset\n");
pi.flags |= ATA_FLAG_SLAVE_POSS;
} else {
- dev_printk(KERN_INFO, &pdev->dev,
- "Detected SiS 182/965L chipset\n");
+ dev_info(&pdev->dev, "Detected SiS 182/965L chipset\n");
}
break;
case 0x1182:
- dev_printk(KERN_INFO, &pdev->dev,
- "Detected SiS 1182/966/680 SATA controller\n");
+ dev_info(&pdev->dev,
+ "Detected SiS 1182/966/680 SATA controller\n");
pi.flags |= ATA_FLAG_SLAVE_POSS;
break;
case 0x1183:
- dev_printk(KERN_INFO, &pdev->dev,
- "Detected SiS 1183/966/966L/968/680 controller in PATA mode\n");
+ dev_info(&pdev->dev,
+ "Detected SiS 1183/966/966L/968/680 controller in PATA mode\n");
ppi[0] = &sis_info133_for_sata;
ppi[1] = &sis_info133_for_sata;
break;
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 84980ac..833607f 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -449,15 +449,13 @@ static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base)
static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
const struct ata_port_info *ppi[] =
{ &k2_port_info[ent->driver_data], NULL };
struct ata_host *host;
void __iomem *mmio_base;
int n_ports, i, rc, bar_pos;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* allocate host */
n_ports = 4;
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 8fd3b72..cdaebbe 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -1440,15 +1440,13 @@ static void pdc_20621_init(struct ata_host *host)
static int pdc_sata_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int printed_version;
const struct ata_port_info *ppi[] =
{ &pdc_port_info[ent->driver_data], NULL };
struct ata_host *host;
struct pdc_host_priv *hpriv;
int i, rc;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* allocate host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
index 235be71..b54ebfc 100644
--- a/drivers/ata/sata_uli.c
+++ b/drivers/ata/sata_uli.c
@@ -145,7 +145,6 @@ static int uli_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
const struct ata_port_info *ppi[] = { &uli_port_info, NULL };
unsigned int board_idx = (unsigned int) ent->driver_data;
struct ata_host *host;
@@ -154,8 +153,7 @@ static int uli_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ata_ioports *ioaddr;
int n_ports, rc;
- if (!printed_version++)
- dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
index 54434db..f93e43b 100644
--- a/drivers/ata/sata_via.c
+++ b/drivers/ata/sata_via.c
@@ -360,9 +360,9 @@ static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
online = (sstatus & 0xf) == 0x3;
- ata_port_printk(ap, KERN_INFO,
- "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
- online ? "up" : "down", sstatus, scontrol);
+ ata_port_info(ap,
+ "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
+ online ? "up" : "down", sstatus, scontrol);
/* SStatus is read one more time */
svia_scr_read(link, SCR_STATUS, &sstatus);
@@ -469,7 +469,7 @@ static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n");
+ dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
return rc;
}
@@ -488,14 +488,14 @@ static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
*r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi));
if (!host) {
- dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n");
+ dev_err(&pdev->dev, "failed to allocate host\n");
return -ENOMEM;
}
rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap "
- "PCI BARs (errno=%d)\n", rc);
+ dev_err(&pdev->dev, "failed to request/iomap PCI BARs (errno=%d)\n",
+ rc);
return rc;
}
host->iomap = pcim_iomap_table(pdev);
@@ -526,7 +526,7 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
if (rc) {
- dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n");
+ dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
return rc;
}
@@ -542,15 +542,14 @@ static void svia_configure(struct pci_dev *pdev, int board_id)
u8 tmp8;
pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
- dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n",
- (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
+ dev_info(&pdev->dev, "routed to hard irq line %d\n",
+ (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
/* make sure SATA channels are enabled */
pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
- dev_printk(KERN_DEBUG, &pdev->dev,
- "enabling SATA channels (0x%x)\n",
- (int) tmp8);
+ dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n",
+ (int)tmp8);
tmp8 |= ALL_PORTS;
pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
}
@@ -558,9 +557,8 @@ static void svia_configure(struct pci_dev *pdev, int board_id)
/* make sure interrupts for each channel sent to us */
pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
- dev_printk(KERN_DEBUG, &pdev->dev,
- "enabling SATA channel interrupts (0x%x)\n",
- (int) tmp8);
+ dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n",
+ (int) tmp8);
tmp8 |= ALL_PORTS;
pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
}
@@ -568,9 +566,9 @@ static void svia_configure(struct pci_dev *pdev, int board_id)
/* make sure native mode is enabled */
pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
- dev_printk(KERN_DEBUG, &pdev->dev,
- "enabling SATA channel native mode (0x%x)\n",
- (int) tmp8);
+ dev_dbg(&pdev->dev,
+ "enabling SATA channel native mode (0x%x)\n",
+ (int) tmp8);
tmp8 |= NATIVE_MODE_ALL;
pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
}
@@ -606,15 +604,13 @@ static void svia_configure(struct pci_dev *pdev, int board_id)
static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int printed_version;
unsigned int i;
int rc;
struct ata_host *host = NULL;
int board_id = (int) ent->driver_data;
const unsigned *bar_sizes;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
rc = pcim_enable_device(pdev);
if (rc)
@@ -628,7 +624,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
if ((pci_resource_start(pdev, i) == 0) ||
(pci_resource_len(pdev, i) < bar_sizes[i])) {
- dev_printk(KERN_ERR, &pdev->dev,
+ dev_err(&pdev->dev,
"invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
i,
(unsigned long long)pci_resource_start(pdev, i),
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index 7c98737..6135a52 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -273,9 +273,8 @@ static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance)
if (unlikely(status == 0xffffffff || status == 0)) {
if (status)
- dev_printk(KERN_ERR, host->dev,
- ": IRQ status == 0xffffffff, "
- "PCI fault or device removal?\n");
+ dev_err(host->dev,
+ ": IRQ status == 0xffffffff, PCI fault or device removal?\n");
goto out;
}
@@ -347,14 +346,12 @@ static int __devinit vsc_sata_init_one(struct pci_dev *pdev,
.port_ops = &vsc_sata_ops,
};
const struct ata_port_info *ppi[] = { &pi, NULL };
- static int printed_version;
struct ata_host *host;
void __iomem *mmio_base;
int i, rc;
u8 cls;
- if (!printed_version++)
- dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
+ ata_print_version_once(&pdev->dev, DRV_VERSION);
/* allocate host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index a5fcb1e..89b30f3 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -38,7 +38,7 @@
#include <linux/ihex.h>
#include <linux/slab.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/byteorder.h>
@@ -802,7 +802,7 @@ static void fill_rx_pool (amb_dev * dev, unsigned char pool,
}
// cast needed as there is no %? for pointer differences
PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li",
- skb, skb->head, (long) (skb_end_pointer(skb) - skb->head));
+ skb, skb->head, (long) skb_end_offset(skb));
rx.handle = virt_to_bus (skb);
rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
if (rx_give (dev, &rx, pool))
@@ -813,7 +813,7 @@ static void fill_rx_pool (amb_dev * dev, unsigned char pool,
return;
}
-// top up all RX pools (can also be called as a bottom half)
+// top up all RX pools
static void fill_rx_pools (amb_dev * dev) {
unsigned char pool;
@@ -872,11 +872,7 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id) {
++irq_work;
if (irq_work) {
-#ifdef FILL_RX_POOLS_IN_BH
- schedule_work (&dev->bh);
-#else
fill_rx_pools (dev);
-#endif
PRINTD (DBG_IRQ, "work done: %u", irq_work);
} else {
@@ -2154,11 +2150,6 @@ static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev)
dev->tx_avail = ATM_OC3_PCR;
dev->rx_avail = ATM_OC3_PCR;
-#ifdef FILL_RX_POOLS_IN_BH
- // initialise bottom half
- INIT_WORK(&dev->bh, (void (*)(void *)) fill_rx_pools, dev);
-#endif
-
// semaphore for txer/rxer modifications - we cannot use a
// spinlock as the critical region needs to switch processes
mutex_init(&dev->vcc_sf);
diff --git a/drivers/atm/ambassador.h b/drivers/atm/ambassador.h
index bd1c46a..aa97105 100644
--- a/drivers/atm/ambassador.h
+++ b/drivers/atm/ambassador.h
@@ -630,10 +630,6 @@ struct amb_dev {
u32 iobase;
u32 * membase;
-#ifdef FILL_RX_POOLS_IN_BH
- struct work_struct bh;
-#endif
-
amb_cq cq;
amb_txq txq;
amb_rxq rxq[NUM_RX_POOLS];
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index 0b06250..b22d71c 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -11,7 +11,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
extern int atm_init_aal5(struct atm_vcc *vcc); /* "raw" AAL5 transport */
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 3230ea0..956e9ac 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -21,7 +21,7 @@
#include <linux/slab.h>
#include <asm/system.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <asm/string.h>
#include <asm/byteorder.h>
@@ -1134,8 +1134,9 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */
skb_headlen(skb));
else
put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
- skb_shinfo(skb)->frags[i].page + skb_shinfo(skb)->frags[i].page_offset,
- skb_shinfo(skb)->frags[i].size);
+ skb_frag_page(&skb_shinfo(skb)->frags[i]) +
+ skb_shinfo(skb)->frags[i].page_offset,
+ skb_frag_size(&skb_shinfo(skb)->frags[i]));
}
if (skb->len & 3)
put_dma(tx->index,eni_dev->dma,&j,zeroes,4-(skb->len & 3));
diff --git a/drivers/atm/eni.h b/drivers/atm/eni.h
index e4c9525..dc9a62c 100644
--- a/drivers/atm/eni.h
+++ b/drivers/atm/eni.h
@@ -8,12 +8,13 @@
#include <linux/atm.h>
#include <linux/atmdev.h>
+#include <linux/interrupt.h>
#include <linux/sonet.h>
#include <linux/skbuff.h>
#include <linux/time.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "midway.h"
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index ef7a658..5072f8a 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -44,6 +44,7 @@
#include <linux/ioport.h> /* for request_region */
#include <linux/uio.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/capability.h>
#include <linux/bitops.h>
#include <linux/slab.h>
@@ -51,7 +52,7 @@
#include <asm/system.h>
#include <asm/string.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <linux/wait.h>
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index bc9e702..361f5ae 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -44,7 +44,7 @@
#include <asm/dma.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#ifdef CONFIG_SBUS
#include <linux/of.h>
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index d58e3fc..b812103 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -38,13 +38,14 @@
#include <linux/delay.h>
#include <linux/uio.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <asm/system.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <asm/string.h>
#include <asm/byteorder.h>
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 1f8d724..81845fa 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -37,6 +37,7 @@
#include <linux/atm.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/wait.h>
#include <linux/jiffies.h>
@@ -45,7 +46,7 @@
#include <asm/io.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#ifdef CONFIG_ATM_IDT77252_USE_SUNI
@@ -1257,7 +1258,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
tail = readl(SAR_REG_RAWCT);
pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue),
- skb_end_pointer(queue) - queue->head - 16,
+ skb_end_offset(queue) - 16,
PCI_DMA_FROMDEVICE);
while (head != tail) {
@@ -3415,27 +3416,28 @@ init_card(struct atm_dev *dev)
size = sizeof(struct vc_map *) * card->tct_size;
IPRINTK("%s: allocate %d byte for VC map.\n", card->name, size);
- if (NULL == (card->vcs = vmalloc(size))) {
+ card->vcs = vzalloc(size);
+ if (!card->vcs) {
printk("%s: memory allocation failure.\n", card->name);
deinit_card(card);
return -1;
}
- memset(card->vcs, 0, size);
size = sizeof(struct vc_map *) * card->scd_size;
IPRINTK("%s: allocate %d byte for SCD to VC mapping.\n",
card->name, size);
- if (NULL == (card->scd2vc = vmalloc(size))) {
+ card->scd2vc = vzalloc(size);
+ if (!card->scd2vc) {
printk("%s: memory allocation failure.\n", card->name);
deinit_card(card);
return -1;
}
- memset(card->scd2vc, 0, size);
size = sizeof(struct tst_info) * (card->tst_size - 2);
IPRINTK("%s: allocate %d byte for TST to VC mapping.\n",
card->name, size);
- if (NULL == (card->soft_tst = vmalloc(size))) {
+ card->soft_tst = vmalloc(size);
+ if (!card->soft_tst) {
printk("%s: memory allocation failure.\n", card->name);
deinit_card(card);
return -1;
@@ -3511,7 +3513,7 @@ init_card(struct atm_dev *dev)
tmp = dev_get_by_name(&init_net, tname); /* jhs: was "tmp = dev_get(tname);" */
if (tmp) {
memcpy(card->atmdev->esi, tmp->dev_addr, 6);
-
+ dev_put(tmp);
printk("%s: ESI %pM\n", card->name, card->atmdev->esi);
}
/*
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index dee4f01..3d0c2b0 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -53,11 +53,12 @@
#include <linux/delay.h>
#include <linux/uio.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <asm/system.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include <asm/string.h>
#include <asm/byteorder.h>
@@ -817,127 +818,152 @@ static void ia_hw_type(IADEV *iadev) {
}
-static void IaFrontEndIntr(IADEV *iadev) {
- volatile IA_SUNI *suni;
- volatile ia_mb25_t *mb25;
- volatile suni_pm7345_t *suni_pm7345;
-
- if(iadev->phy_type & FE_25MBIT_PHY) {
- mb25 = (ia_mb25_t*)iadev->phy;
- iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
- } else if (iadev->phy_type & FE_DS3_PHY) {
- suni_pm7345 = (suni_pm7345_t *)iadev->phy;
- /* clear FRMR interrupts */
- (void) suni_pm7345->suni_ds3_frm_intr_stat;
- iadev->carrier_detect =
- Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
- } else if (iadev->phy_type & FE_E3_PHY ) {
- suni_pm7345 = (suni_pm7345_t *)iadev->phy;
- (void) suni_pm7345->suni_e3_frm_maint_intr_ind;
- iadev->carrier_detect =
- Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
- }
- else {
- suni = (IA_SUNI *)iadev->phy;
- (void) suni->suni_rsop_status;
- iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
- }
- if (iadev->carrier_detect)
- printk("IA: SUNI carrier detected\n");
- else
- printk("IA: SUNI carrier lost signal\n");
- return;
+static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
+{
+ return readl(ia->phy + (reg >> 2));
+}
+
+static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
+{
+ writel(val, ia->phy + (reg >> 2));
+}
+
+static void ia_frontend_intr(struct iadev_priv *iadev)
+{
+ u32 status;
+
+ if (iadev->phy_type & FE_25MBIT_PHY) {
+ status = ia_phy_read32(iadev, MB25_INTR_STATUS);
+ iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
+ } else if (iadev->phy_type & FE_DS3_PHY) {
+ ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
+ status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
+ iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
+ } else if (iadev->phy_type & FE_E3_PHY) {
+ ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
+ status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
+ iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
+ } else {
+ status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
+ iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
+ }
+
+ printk(KERN_INFO "IA: SUNI carrier %s\n",
+ iadev->carrier_detect ? "detected" : "lost signal");
}
-static void ia_mb25_init (IADEV *iadev)
+static void ia_mb25_init(struct iadev_priv *iadev)
{
- volatile ia_mb25_t *mb25 = (ia_mb25_t*)iadev->phy;
#if 0
mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
#endif
- mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
- mb25->mb25_diag_control = 0;
- /*
- * Initialize carrier detect state
- */
- iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
- return;
-}
+ ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
+ ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
+
+ iadev->carrier_detect =
+ (ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
+}
-static void ia_suni_pm7345_init (IADEV *iadev)
+struct ia_reg {
+ u16 reg;
+ u16 val;
+};
+
+static void ia_phy_write(struct iadev_priv *iadev,
+ const struct ia_reg *regs, int len)
{
- volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
- if (iadev->phy_type & FE_DS3_PHY)
- {
- iadev->carrier_detect =
- Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
- suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
- suni_pm7345->suni_ds3_frm_cfg = 1;
- suni_pm7345->suni_ds3_tran_cfg = 1;
- suni_pm7345->suni_config = 0;
- suni_pm7345->suni_splr_cfg = 0;
- suni_pm7345->suni_splt_cfg = 0;
- }
- else
- {
- iadev->carrier_detect =
- Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
- suni_pm7345->suni_e3_frm_fram_options = 0x4;
- suni_pm7345->suni_e3_frm_maint_options = 0x20;
- suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
- suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
- suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
- suni_pm7345->suni_e3_tran_fram_options = 0x1;
- suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
- suni_pm7345->suni_splr_cfg = 0x41;
- suni_pm7345->suni_splt_cfg = 0x41;
- }
- /*
- * Enable RSOP loss of signal interrupt.
- */
- suni_pm7345->suni_intr_enbl = 0x28;
-
- /*
- * Clear error counters
- */
- suni_pm7345->suni_id_reset = 0;
-
- /*
- * Clear "PMCTST" in master test register.
- */
- suni_pm7345->suni_master_test = 0;
-
- suni_pm7345->suni_rxcp_ctrl = 0x2c;
- suni_pm7345->suni_rxcp_fctrl = 0x81;
-
- suni_pm7345->suni_rxcp_idle_pat_h1 =
- suni_pm7345->suni_rxcp_idle_pat_h2 =
- suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
- suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
-
- suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
- suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
- suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
- suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
-
- suni_pm7345->suni_rxcp_cell_pat_h1 =
- suni_pm7345->suni_rxcp_cell_pat_h2 =
- suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
- suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
-
- suni_pm7345->suni_rxcp_cell_mask_h1 =
- suni_pm7345->suni_rxcp_cell_mask_h2 =
- suni_pm7345->suni_rxcp_cell_mask_h3 =
- suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
-
- suni_pm7345->suni_txcp_ctrl = 0xa4;
- suni_pm7345->suni_txcp_intr_en_sts = 0x10;
- suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
-
- suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
- SUNI_PM7345_CLB |
- SUNI_PM7345_DLB |
- SUNI_PM7345_PLB);
+ while (len--) {
+ ia_phy_write32(iadev, regs->reg, regs->val);
+ regs++;
+ }
+}
+
+static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
+{
+ static const struct ia_reg suni_ds3_init [] = {
+ { SUNI_DS3_FRM_INTR_ENBL, 0x17 },
+ { SUNI_DS3_FRM_CFG, 0x01 },
+ { SUNI_DS3_TRAN_CFG, 0x01 },
+ { SUNI_CONFIG, 0 },
+ { SUNI_SPLR_CFG, 0 },
+ { SUNI_SPLT_CFG, 0 }
+ };
+ u32 status;
+
+ status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
+ iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
+
+ ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
+}
+
+static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
+{
+ static const struct ia_reg suni_e3_init [] = {
+ { SUNI_E3_FRM_FRAM_OPTIONS, 0x04 },
+ { SUNI_E3_FRM_MAINT_OPTIONS, 0x20 },
+ { SUNI_E3_FRM_FRAM_INTR_ENBL, 0x1d },
+ { SUNI_E3_FRM_MAINT_INTR_ENBL, 0x30 },
+ { SUNI_E3_TRAN_STAT_DIAG_OPTIONS, 0 },
+ { SUNI_E3_TRAN_FRAM_OPTIONS, 0x01 },
+ { SUNI_CONFIG, SUNI_PM7345_E3ENBL },
+ { SUNI_SPLR_CFG, 0x41 },
+ { SUNI_SPLT_CFG, 0x41 }
+ };
+ u32 status;
+
+ status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
+ iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
+ ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
+}
+
+static void ia_suni_pm7345_init(struct iadev_priv *iadev)
+{
+ static const struct ia_reg suni_init [] = {
+ /* Enable RSOP loss of signal interrupt. */
+ { SUNI_INTR_ENBL, 0x28 },
+ /* Clear error counters. */
+ { SUNI_ID_RESET, 0 },
+ /* Clear "PMCTST" in master test register. */
+ { SUNI_MASTER_TEST, 0 },
+
+ { SUNI_RXCP_CTRL, 0x2c },
+ { SUNI_RXCP_FCTRL, 0x81 },
+
+ { SUNI_RXCP_IDLE_PAT_H1, 0 },
+ { SUNI_RXCP_IDLE_PAT_H2, 0 },
+ { SUNI_RXCP_IDLE_PAT_H3, 0 },
+ { SUNI_RXCP_IDLE_PAT_H4, 0x01 },
+
+ { SUNI_RXCP_IDLE_MASK_H1, 0xff },
+ { SUNI_RXCP_IDLE_MASK_H2, 0xff },
+ { SUNI_RXCP_IDLE_MASK_H3, 0xff },
+ { SUNI_RXCP_IDLE_MASK_H4, 0xfe },
+
+ { SUNI_RXCP_CELL_PAT_H1, 0 },
+ { SUNI_RXCP_CELL_PAT_H2, 0 },
+ { SUNI_RXCP_CELL_PAT_H3, 0 },
+ { SUNI_RXCP_CELL_PAT_H4, 0x01 },
+
+ { SUNI_RXCP_CELL_MASK_H1, 0xff },
+ { SUNI_RXCP_CELL_MASK_H2, 0xff },
+ { SUNI_RXCP_CELL_MASK_H3, 0xff },
+ { SUNI_RXCP_CELL_MASK_H4, 0xff },
+
+ { SUNI_TXCP_CTRL, 0xa4 },
+ { SUNI_TXCP_INTR_EN_STS, 0x10 },
+ { SUNI_TXCP_IDLE_PAT_H5, 0x55 }
+ };
+
+ if (iadev->phy_type & FE_DS3_PHY)
+ ia_suni_pm7345_init_ds3(iadev);
+ else
+ ia_suni_pm7345_init_e3(iadev);
+
+ ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
+
+ ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
+ ~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
+ SUNI_PM7345_DLB | SUNI_PM7345_PLB));
#ifdef __SNMP__
suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
#endif /* __SNMP__ */
@@ -1424,10 +1450,10 @@ static int rx_init(struct atm_dev *dev)
iadev->dma + IPHASE5575_RX_LIST_ADDR);
IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
iadev->dma+IPHASE5575_TX_LIST_ADDR,
- *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));
+ readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
iadev->dma+IPHASE5575_RX_LIST_ADDR,
- *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)
+ readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
writew(0, iadev->reass_reg+MODE_REG);
@@ -2207,7 +2233,7 @@ static irqreturn_t ia_int(int irq, void *dev_id)
if (status & STAT_DLERINT)
{
/* Clear this bit by writing a 1 to it. */
- *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
+ writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
rx_dle_intr(dev);
}
if (status & STAT_SEGINT)
@@ -2218,13 +2244,13 @@ static irqreturn_t ia_int(int irq, void *dev_id)
}
if (status & STAT_DLETINT)
{
- *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;
+ writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
tx_dle_intr(dev);
}
if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
{
if (status & STAT_FEINT)
- IaFrontEndIntr(iadev);
+ ia_frontend_intr(iadev);
}
}
return IRQ_RETVAL(handled);
@@ -2555,7 +2581,7 @@ static int __devinit ia_start(struct atm_dev *dev)
goto err_free_rx;
}
/* Get iadev->carrier_detect status */
- IaFrontEndIntr(iadev);
+ ia_frontend_intr(iadev);
}
return 0;
@@ -2826,7 +2852,7 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
case 0xb:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
- IaFrontEndIntr(iadev);
+ ia_frontend_intr(iadev);
break;
case 0xa:
if (!capable(CAP_NET_ADMIN)) return -EPERM;
diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
index b2778e7..53ecac5 100644
--- a/drivers/atm/iphase.h
+++ b/drivers/atm/iphase.h
@@ -889,79 +889,71 @@ typedef struct ia_rtn_q {
} IARTN_Q;
#define SUNI_LOSV 0x04
-typedef struct {
- u32 suni_master_reset; /* SUNI Master Reset and Identity */
- u32 suni_master_config; /* SUNI Master Configuration */
- u32 suni_master_intr_stat; /* SUNI Master Interrupt Status */
- u32 suni_reserved1; /* Reserved */
- u32 suni_master_clk_monitor;/* SUNI Master Clock Monitor */
- u32 suni_master_control; /* SUNI Master Clock Monitor */
- u32 suni_reserved2[10]; /* Reserved */
-
- u32 suni_rsop_control; /* RSOP Control/Interrupt Enable */
- u32 suni_rsop_status; /* RSOP Status/Interrupt States */
- u32 suni_rsop_section_bip8l;/* RSOP Section BIP-8 LSB */
- u32 suni_rsop_section_bip8m;/* RSOP Section BIP-8 MSB */
-
- u32 suni_tsop_control; /* TSOP Control */
- u32 suni_tsop_diag; /* TSOP Disgnostics */
- u32 suni_tsop_reserved[2]; /* TSOP Reserved */
-
- u32 suni_rlop_cs; /* RLOP Control/Status */
- u32 suni_rlop_intr; /* RLOP Interrupt Enable/Status */
- u32 suni_rlop_line_bip24l; /* RLOP Line BIP-24 LSB */
- u32 suni_rlop_line_bip24; /* RLOP Line BIP-24 */
- u32 suni_rlop_line_bip24m; /* RLOP Line BIP-24 MSB */
- u32 suni_rlop_line_febel; /* RLOP Line FEBE LSB */
- u32 suni_rlop_line_febe; /* RLOP Line FEBE */
- u32 suni_rlop_line_febem; /* RLOP Line FEBE MSB */
-
- u32 suni_tlop_control; /* TLOP Control */
- u32 suni_tlop_disg; /* TLOP Disgnostics */
- u32 suni_tlop_reserved[14]; /* TLOP Reserved */
-
- u32 suni_rpop_cs; /* RPOP Status/Control */
- u32 suni_rpop_intr; /* RPOP Interrupt/Status */
- u32 suni_rpop_reserved; /* RPOP Reserved */
- u32 suni_rpop_intr_ena; /* RPOP Interrupt Enable */
- u32 suni_rpop_reserved1[3]; /* RPOP Reserved */
- u32 suni_rpop_path_sig; /* RPOP Path Signal Label */
- u32 suni_rpop_bip8l; /* RPOP Path BIP-8 LSB */
- u32 suni_rpop_bip8m; /* RPOP Path BIP-8 MSB */
- u32 suni_rpop_febel; /* RPOP Path FEBE LSB */
- u32 suni_rpop_febem; /* RPOP Path FEBE MSB */
- u32 suni_rpop_reserved2[4]; /* RPOP Reserved */
-
- u32 suni_tpop_cntrl_daig; /* TPOP Control/Disgnostics */
- u32 suni_tpop_pointer_ctrl; /* TPOP Pointer Control */
- u32 suni_tpop_sourcer_ctrl; /* TPOP Source Control */
- u32 suni_tpop_reserved1[2]; /* TPOP Reserved */
- u32 suni_tpop_arb_prtl; /* TPOP Arbitrary Pointer LSB */
- u32 suni_tpop_arb_prtm; /* TPOP Arbitrary Pointer MSB */
- u32 suni_tpop_reserved2; /* TPOP Reserved */
- u32 suni_tpop_path_sig; /* TPOP Path Signal Lable */
- u32 suni_tpop_path_status; /* TPOP Path Status */
- u32 suni_tpop_reserved3[6]; /* TPOP Reserved */
-
- u32 suni_racp_cs; /* RACP Control/Status */
- u32 suni_racp_intr; /* RACP Interrupt Enable/Status */
- u32 suni_racp_hdr_pattern; /* RACP Match Header Pattern */
- u32 suni_racp_hdr_mask; /* RACP Match Header Mask */
- u32 suni_racp_corr_hcs; /* RACP Correctable HCS Error Count */
- u32 suni_racp_uncorr_hcs; /* RACP Uncorrectable HCS Error Count */
- u32 suni_racp_reserved[10]; /* RACP Reserved */
-
- u32 suni_tacp_control; /* TACP Control */
- u32 suni_tacp_idle_hdr_pat; /* TACP Idle Cell Header Pattern */
- u32 suni_tacp_idle_pay_pay; /* TACP Idle Cell Payld Octet Pattern */
- u32 suni_tacp_reserved[5]; /* TACP Reserved */
-
- u32 suni_reserved3[24]; /* Reserved */
-
- u32 suni_master_test; /* SUNI Master Test */
- u32 suni_reserved_test; /* SUNI Reserved for Test */
-} IA_SUNI;
-
+enum ia_suni {
+ SUNI_MASTER_RESET = 0x000, /* SUNI Master Reset and Identity */
+ SUNI_MASTER_CONFIG = 0x004, /* SUNI Master Configuration */
+ SUNI_MASTER_INTR_STAT = 0x008, /* SUNI Master Interrupt Status */
+ SUNI_RESERVED1 = 0x00c, /* Reserved */
+ SUNI_MASTER_CLK_MONITOR = 0x010, /* SUNI Master Clock Monitor */
+ SUNI_MASTER_CONTROL = 0x014, /* SUNI Master Clock Monitor */
+ /* Reserved (10) */
+ SUNI_RSOP_CONTROL = 0x040, /* RSOP Control/Interrupt Enable */
+ SUNI_RSOP_STATUS = 0x044, /* RSOP Status/Interrupt States */
+ SUNI_RSOP_SECTION_BIP8L = 0x048, /* RSOP Section BIP-8 LSB */
+ SUNI_RSOP_SECTION_BIP8M = 0x04c, /* RSOP Section BIP-8 MSB */
+
+ SUNI_TSOP_CONTROL = 0x050, /* TSOP Control */
+ SUNI_TSOP_DIAG = 0x054, /* TSOP Disgnostics */
+ /* Reserved (2) */
+ SUNI_RLOP_CS = 0x060, /* RLOP Control/Status */
+ SUNI_RLOP_INTR = 0x064, /* RLOP Interrupt Enable/Status */
+ SUNI_RLOP_LINE_BIP24L = 0x068, /* RLOP Line BIP-24 LSB */
+ SUNI_RLOP_LINE_BIP24 = 0x06c, /* RLOP Line BIP-24 */
+ SUNI_RLOP_LINE_BIP24M = 0x070, /* RLOP Line BIP-24 MSB */
+ SUNI_RLOP_LINE_FEBEL = 0x074, /* RLOP Line FEBE LSB */
+ SUNI_RLOP_LINE_FEBE = 0x078, /* RLOP Line FEBE */
+ SUNI_RLOP_LINE_FEBEM = 0x07c, /* RLOP Line FEBE MSB */
+
+ SUNI_TLOP_CONTROL = 0x080, /* TLOP Control */
+ SUNI_TLOP_DISG = 0x084, /* TLOP Disgnostics */
+ /* Reserved (14) */
+ SUNI_RPOP_CS = 0x0c0, /* RPOP Status/Control */
+ SUNI_RPOP_INTR = 0x0c4, /* RPOP Interrupt/Status */
+ SUNI_RPOP_RESERVED = 0x0c8, /* RPOP Reserved */
+ SUNI_RPOP_INTR_ENA = 0x0cc, /* RPOP Interrupt Enable */
+ /* Reserved (3) */
+ SUNI_RPOP_PATH_SIG = 0x0dc, /* RPOP Path Signal Label */
+ SUNI_RPOP_BIP8L = 0x0e0, /* RPOP Path BIP-8 LSB */
+ SUNI_RPOP_BIP8M = 0x0e4, /* RPOP Path BIP-8 MSB */
+ SUNI_RPOP_FEBEL = 0x0e8, /* RPOP Path FEBE LSB */
+ SUNI_RPOP_FEBEM = 0x0ec, /* RPOP Path FEBE MSB */
+ /* Reserved (4) */
+ SUNI_TPOP_CNTRL_DAIG = 0x100, /* TPOP Control/Disgnostics */
+ SUNI_TPOP_POINTER_CTRL = 0x104, /* TPOP Pointer Control */
+ SUNI_TPOP_SOURCER_CTRL = 0x108, /* TPOP Source Control */
+ /* Reserved (2) */
+ SUNI_TPOP_ARB_PRTL = 0x114, /* TPOP Arbitrary Pointer LSB */
+ SUNI_TPOP_ARB_PRTM = 0x118, /* TPOP Arbitrary Pointer MSB */
+ SUNI_TPOP_RESERVED2 = 0x11c, /* TPOP Reserved */
+ SUNI_TPOP_PATH_SIG = 0x120, /* TPOP Path Signal Lable */
+ SUNI_TPOP_PATH_STATUS = 0x124, /* TPOP Path Status */
+ /* Reserved (6) */
+ SUNI_RACP_CS = 0x140, /* RACP Control/Status */
+ SUNI_RACP_INTR = 0x144, /* RACP Interrupt Enable/Status */
+ SUNI_RACP_HDR_PATTERN = 0x148, /* RACP Match Header Pattern */
+ SUNI_RACP_HDR_MASK = 0x14c, /* RACP Match Header Mask */
+ SUNI_RACP_CORR_HCS = 0x150, /* RACP Correctable HCS Error Count */
+ SUNI_RACP_UNCORR_HCS = 0x154, /* RACP Uncorrectable HCS Err Count */
+ /* Reserved (10) */
+ SUNI_TACP_CONTROL = 0x180, /* TACP Control */
+ SUNI_TACP_IDLE_HDR_PAT = 0x184, /* TACP Idle Cell Header Pattern */
+ SUNI_TACP_IDLE_PAY_PAY = 0x188, /* TACP Idle Cell Payld Octet Patrn */
+ /* Reserved (5) */
+ /* Reserved (24) */
+ /* FIXME: unused but name conflicts.
+ * SUNI_MASTER_TEST = 0x200, SUNI Master Test */
+ SUNI_RESERVED_TEST = 0x204 /* SUNI Reserved for Test */
+};
typedef struct _SUNI_STATS_
{
@@ -993,13 +985,11 @@ typedef struct _SUNI_STATS_
u32 racp_uchcs_count; // uncorrectable HCS error count
} IA_SUNI_STATS;
-typedef struct iadev_t {
+typedef struct iadev_priv {
/*-----base pointers into (i)chipSAR+ address space */
- u32 __iomem *phy; /* base pointer into phy(SUNI) */
- u32 __iomem *dma; /* base pointer into DMA control
- registers */
- u32 __iomem *reg; /* base pointer to SAR registers
- - Bus Interface Control Regs */
+ u32 __iomem *phy; /* Base pointer into phy (SUNI). */
+ u32 __iomem *dma; /* Base pointer into DMA control registers. */
+ u32 __iomem *reg; /* Base pointer to SAR registers. */
u32 __iomem *seg_reg; /* base pointer to segmentation engine
internal registers */
u32 __iomem *reass_reg; /* base pointer to reassemble engine
@@ -1071,14 +1061,14 @@ typedef struct iadev_t {
#define INPH_IA_VCC(v) ((struct ia_vcc *) (v)->dev_data)
/******************* IDT77105 25MB/s PHY DEFINE *****************************/
-typedef struct {
- u_int mb25_master_ctrl; /* Master control */
- u_int mb25_intr_status; /* Interrupt status */
- u_int mb25_diag_control; /* Diagnostic control */
- u_int mb25_led_hec; /* LED driver and HEC status/control */
- u_int mb25_low_byte_counter; /* Low byte counter */
- u_int mb25_high_byte_counter; /* High byte counter */
-} ia_mb25_t;
+enum ia_mb25 {
+ MB25_MASTER_CTRL = 0x00, /* Master control */
+ MB25_INTR_STATUS = 0x04, /* Interrupt status */
+ MB25_DIAG_CONTROL = 0x08, /* Diagnostic control */
+ MB25_LED_HEC = 0x0c, /* LED driver and HEC status/control */
+ MB25_LOW_BYTE_COUNTER = 0x10,
+ MB25_HIGH_BYTE_COUNTER = 0x14
+};
/*
* Master Control
@@ -1127,122 +1117,121 @@ typedef struct {
#define FE_E3_PHY 0x0090 /* E3 */
/*********************** SUNI_PM7345 PHY DEFINE HERE *********************/
-typedef struct _suni_pm7345_t
-{
- u_int suni_config; /* SUNI Configuration */
- u_int suni_intr_enbl; /* SUNI Interrupt Enable */
- u_int suni_intr_stat; /* SUNI Interrupt Status */
- u_int suni_control; /* SUNI Control */
- u_int suni_id_reset; /* SUNI Reset and Identity */
- u_int suni_data_link_ctrl;
- u_int suni_rboc_conf_intr_enbl;
- u_int suni_rboc_stat;
- u_int suni_ds3_frm_cfg;
- u_int suni_ds3_frm_intr_enbl;
- u_int suni_ds3_frm_intr_stat;
- u_int suni_ds3_frm_stat;
- u_int suni_rfdl_cfg;
- u_int suni_rfdl_enbl_stat;
- u_int suni_rfdl_stat;
- u_int suni_rfdl_data;
- u_int suni_pmon_chng;
- u_int suni_pmon_intr_enbl_stat;
- u_int suni_reserved1[0x13-0x11];
- u_int suni_pmon_lcv_evt_cnt_lsb;
- u_int suni_pmon_lcv_evt_cnt_msb;
- u_int suni_pmon_fbe_evt_cnt_lsb;
- u_int suni_pmon_fbe_evt_cnt_msb;
- u_int suni_pmon_sez_det_cnt_lsb;
- u_int suni_pmon_sez_det_cnt_msb;
- u_int suni_pmon_pe_evt_cnt_lsb;
- u_int suni_pmon_pe_evt_cnt_msb;
- u_int suni_pmon_ppe_evt_cnt_lsb;
- u_int suni_pmon_ppe_evt_cnt_msb;
- u_int suni_pmon_febe_evt_cnt_lsb;
- u_int suni_pmon_febe_evt_cnt_msb;
- u_int suni_ds3_tran_cfg;
- u_int suni_ds3_tran_diag;
- u_int suni_reserved2[0x23-0x21];
- u_int suni_xfdl_cfg;
- u_int suni_xfdl_intr_st;
- u_int suni_xfdl_xmit_data;
- u_int suni_xboc_code;
- u_int suni_splr_cfg;
- u_int suni_splr_intr_en;
- u_int suni_splr_intr_st;
- u_int suni_splr_status;
- u_int suni_splt_cfg;
- u_int suni_splt_cntl;
- u_int suni_splt_diag_g1;
- u_int suni_splt_f1;
- u_int suni_cppm_loc_meters;
- u_int suni_cppm_chng_of_cppm_perf_meter;
- u_int suni_cppm_b1_err_cnt_lsb;
- u_int suni_cppm_b1_err_cnt_msb;
- u_int suni_cppm_framing_err_cnt_lsb;
- u_int suni_cppm_framing_err_cnt_msb;
- u_int suni_cppm_febe_cnt_lsb;
- u_int suni_cppm_febe_cnt_msb;
- u_int suni_cppm_hcs_err_cnt_lsb;
- u_int suni_cppm_hcs_err_cnt_msb;
- u_int suni_cppm_idle_un_cell_cnt_lsb;
- u_int suni_cppm_idle_un_cell_cnt_msb;
- u_int suni_cppm_rcv_cell_cnt_lsb;
- u_int suni_cppm_rcv_cell_cnt_msb;
- u_int suni_cppm_xmit_cell_cnt_lsb;
- u_int suni_cppm_xmit_cell_cnt_msb;
- u_int suni_rxcp_ctrl;
- u_int suni_rxcp_fctrl;
- u_int suni_rxcp_intr_en_sts;
- u_int suni_rxcp_idle_pat_h1;
- u_int suni_rxcp_idle_pat_h2;
- u_int suni_rxcp_idle_pat_h3;
- u_int suni_rxcp_idle_pat_h4;
- u_int suni_rxcp_idle_mask_h1;
- u_int suni_rxcp_idle_mask_h2;
- u_int suni_rxcp_idle_mask_h3;
- u_int suni_rxcp_idle_mask_h4;
- u_int suni_rxcp_cell_pat_h1;
- u_int suni_rxcp_cell_pat_h2;
- u_int suni_rxcp_cell_pat_h3;
- u_int suni_rxcp_cell_pat_h4;
- u_int suni_rxcp_cell_mask_h1;
- u_int suni_rxcp_cell_mask_h2;
- u_int suni_rxcp_cell_mask_h3;
- u_int suni_rxcp_cell_mask_h4;
- u_int suni_rxcp_hcs_cs;
- u_int suni_rxcp_lcd_cnt_threshold;
- u_int suni_reserved3[0x57-0x54];
- u_int suni_txcp_ctrl;
- u_int suni_txcp_intr_en_sts;
- u_int suni_txcp_idle_pat_h1;
- u_int suni_txcp_idle_pat_h2;
- u_int suni_txcp_idle_pat_h3;
- u_int suni_txcp_idle_pat_h4;
- u_int suni_txcp_idle_pat_h5;
- u_int suni_txcp_idle_payload;
- u_int suni_e3_frm_fram_options;
- u_int suni_e3_frm_maint_options;
- u_int suni_e3_frm_fram_intr_enbl;
- u_int suni_e3_frm_fram_intr_ind_stat;
- u_int suni_e3_frm_maint_intr_enbl;
- u_int suni_e3_frm_maint_intr_ind;
- u_int suni_e3_frm_maint_stat;
- u_int suni_reserved4;
- u_int suni_e3_tran_fram_options;
- u_int suni_e3_tran_stat_diag_options;
- u_int suni_e3_tran_bip_8_err_mask;
- u_int suni_e3_tran_maint_adapt_options;
- u_int suni_ttb_ctrl;
- u_int suni_ttb_trail_trace_id_stat;
- u_int suni_ttb_ind_addr;
- u_int suni_ttb_ind_data;
- u_int suni_ttb_exp_payload_type;
- u_int suni_ttb_payload_type_ctrl_stat;
- u_int suni_pad5[0x7f-0x71];
- u_int suni_master_test;
- u_int suni_pad6[0xff-0x80];
-}suni_pm7345_t;
+enum suni_pm7345 {
+ SUNI_CONFIG = 0x000, /* SUNI Configuration */
+ SUNI_INTR_ENBL = 0x004, /* SUNI Interrupt Enable */
+ SUNI_INTR_STAT = 0x008, /* SUNI Interrupt Status */
+ SUNI_CONTROL = 0x00c, /* SUNI Control */
+ SUNI_ID_RESET = 0x010, /* SUNI Reset and Identity */
+ SUNI_DATA_LINK_CTRL = 0x014,
+ SUNI_RBOC_CONF_INTR_ENBL = 0x018,
+ SUNI_RBOC_STAT = 0x01c,
+ SUNI_DS3_FRM_CFG = 0x020,
+ SUNI_DS3_FRM_INTR_ENBL = 0x024,
+ SUNI_DS3_FRM_INTR_STAT = 0x028,
+ SUNI_DS3_FRM_STAT = 0x02c,
+ SUNI_RFDL_CFG = 0x030,
+ SUNI_RFDL_ENBL_STAT = 0x034,
+ SUNI_RFDL_STAT = 0x038,
+ SUNI_RFDL_DATA = 0x03c,
+ SUNI_PMON_CHNG = 0x040,
+ SUNI_PMON_INTR_ENBL_STAT = 0x044,
+ /* SUNI_RESERVED1 (0x13 - 0x11) */
+ SUNI_PMON_LCV_EVT_CNT_LSB = 0x050,
+ SUNI_PMON_LCV_EVT_CNT_MSB = 0x054,
+ SUNI_PMON_FBE_EVT_CNT_LSB = 0x058,
+ SUNI_PMON_FBE_EVT_CNT_MSB = 0x05c,
+ SUNI_PMON_SEZ_DET_CNT_LSB = 0x060,
+ SUNI_PMON_SEZ_DET_CNT_MSB = 0x064,
+ SUNI_PMON_PE_EVT_CNT_LSB = 0x068,
+ SUNI_PMON_PE_EVT_CNT_MSB = 0x06c,
+ SUNI_PMON_PPE_EVT_CNT_LSB = 0x070,
+ SUNI_PMON_PPE_EVT_CNT_MSB = 0x074,
+ SUNI_PMON_FEBE_EVT_CNT_LSB = 0x078,
+ SUNI_PMON_FEBE_EVT_CNT_MSB = 0x07c,
+ SUNI_DS3_TRAN_CFG = 0x080,
+ SUNI_DS3_TRAN_DIAG = 0x084,
+ /* SUNI_RESERVED2 (0x23 - 0x21) */
+ SUNI_XFDL_CFG = 0x090,
+ SUNI_XFDL_INTR_ST = 0x094,
+ SUNI_XFDL_XMIT_DATA = 0x098,
+ SUNI_XBOC_CODE = 0x09c,
+ SUNI_SPLR_CFG = 0x0a0,
+ SUNI_SPLR_INTR_EN = 0x0a4,
+ SUNI_SPLR_INTR_ST = 0x0a8,
+ SUNI_SPLR_STATUS = 0x0ac,
+ SUNI_SPLT_CFG = 0x0b0,
+ SUNI_SPLT_CNTL = 0x0b4,
+ SUNI_SPLT_DIAG_G1 = 0x0b8,
+ SUNI_SPLT_F1 = 0x0bc,
+ SUNI_CPPM_LOC_METERS = 0x0c0,
+ SUNI_CPPM_CHG_OF_CPPM_PERF_METR = 0x0c4,
+ SUNI_CPPM_B1_ERR_CNT_LSB = 0x0c8,
+ SUNI_CPPM_B1_ERR_CNT_MSB = 0x0cc,
+ SUNI_CPPM_FRAMING_ERR_CNT_LSB = 0x0d0,
+ SUNI_CPPM_FRAMING_ERR_CNT_MSB = 0x0d4,
+ SUNI_CPPM_FEBE_CNT_LSB = 0x0d8,
+ SUNI_CPPM_FEBE_CNT_MSB = 0x0dc,
+ SUNI_CPPM_HCS_ERR_CNT_LSB = 0x0e0,
+ SUNI_CPPM_HCS_ERR_CNT_MSB = 0x0e4,
+ SUNI_CPPM_IDLE_UN_CELL_CNT_LSB = 0x0e8,
+ SUNI_CPPM_IDLE_UN_CELL_CNT_MSB = 0x0ec,
+ SUNI_CPPM_RCV_CELL_CNT_LSB = 0x0f0,
+ SUNI_CPPM_RCV_CELL_CNT_MSB = 0x0f4,
+ SUNI_CPPM_XMIT_CELL_CNT_LSB = 0x0f8,
+ SUNI_CPPM_XMIT_CELL_CNT_MSB = 0x0fc,
+ SUNI_RXCP_CTRL = 0x100,
+ SUNI_RXCP_FCTRL = 0x104,
+ SUNI_RXCP_INTR_EN_STS = 0x108,
+ SUNI_RXCP_IDLE_PAT_H1 = 0x10c,
+ SUNI_RXCP_IDLE_PAT_H2 = 0x110,
+ SUNI_RXCP_IDLE_PAT_H3 = 0x114,
+ SUNI_RXCP_IDLE_PAT_H4 = 0x118,
+ SUNI_RXCP_IDLE_MASK_H1 = 0x11c,
+ SUNI_RXCP_IDLE_MASK_H2 = 0x120,
+ SUNI_RXCP_IDLE_MASK_H3 = 0x124,
+ SUNI_RXCP_IDLE_MASK_H4 = 0x128,
+ SUNI_RXCP_CELL_PAT_H1 = 0x12c,
+ SUNI_RXCP_CELL_PAT_H2 = 0x130,
+ SUNI_RXCP_CELL_PAT_H3 = 0x134,
+ SUNI_RXCP_CELL_PAT_H4 = 0x138,
+ SUNI_RXCP_CELL_MASK_H1 = 0x13c,
+ SUNI_RXCP_CELL_MASK_H2 = 0x140,
+ SUNI_RXCP_CELL_MASK_H3 = 0x144,
+ SUNI_RXCP_CELL_MASK_H4 = 0x148,
+ SUNI_RXCP_HCS_CS = 0x14c,
+ SUNI_RXCP_LCD_CNT_THRESHOLD = 0x150,
+ /* SUNI_RESERVED3 (0x57 - 0x54) */
+ SUNI_TXCP_CTRL = 0x160,
+ SUNI_TXCP_INTR_EN_STS = 0x164,
+ SUNI_TXCP_IDLE_PAT_H1 = 0x168,
+ SUNI_TXCP_IDLE_PAT_H2 = 0x16c,
+ SUNI_TXCP_IDLE_PAT_H3 = 0x170,
+ SUNI_TXCP_IDLE_PAT_H4 = 0x174,
+ SUNI_TXCP_IDLE_PAT_H5 = 0x178,
+ SUNI_TXCP_IDLE_PAYLOAD = 0x17c,
+ SUNI_E3_FRM_FRAM_OPTIONS = 0x180,
+ SUNI_E3_FRM_MAINT_OPTIONS = 0x184,
+ SUNI_E3_FRM_FRAM_INTR_ENBL = 0x188,
+ SUNI_E3_FRM_FRAM_INTR_IND_STAT = 0x18c,
+ SUNI_E3_FRM_MAINT_INTR_ENBL = 0x190,
+ SUNI_E3_FRM_MAINT_INTR_IND = 0x194,
+ SUNI_E3_FRM_MAINT_STAT = 0x198,
+ SUNI_RESERVED4 = 0x19c,
+ SUNI_E3_TRAN_FRAM_OPTIONS = 0x1a0,
+ SUNI_E3_TRAN_STAT_DIAG_OPTIONS = 0x1a4,
+ SUNI_E3_TRAN_BIP_8_ERR_MASK = 0x1a8,
+ SUNI_E3_TRAN_MAINT_ADAPT_OPTS = 0x1ac,
+ SUNI_TTB_CTRL = 0x1b0,
+ SUNI_TTB_TRAIL_TRACE_ID_STAT = 0x1b4,
+ SUNI_TTB_IND_ADDR = 0x1b8,
+ SUNI_TTB_IND_DATA = 0x1bc,
+ SUNI_TTB_EXP_PAYLOAD_TYPE = 0x1c0,
+ SUNI_TTB_PAYLOAD_TYPE_CTRL_STAT = 0x1c4,
+ /* SUNI_PAD5 (0x7f - 0x71) */
+ SUNI_MASTER_TEST = 0x200,
+ /* SUNI_PAD6 (0xff - 0x80) */
+};
#define SUNI_PM7345_T suni_pm7345_t
#define SUNI_PM7345 0x20 /* Suni chip type */
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 4e8ba56..f556969 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -1457,10 +1457,9 @@ static int __devinit vcc_table_allocate(struct lanai_dev *lanai)
return (lanai->vccs == NULL) ? -ENOMEM : 0;
#else
int bytes = (lanai->num_vci) * sizeof(struct lanai_vcc *);
- lanai->vccs = (struct lanai_vcc **) vmalloc(bytes);
+ lanai->vccs = vzalloc(bytes);
if (unlikely(lanai->vccs == NULL))
return -ENOMEM;
- memset(lanai->vccs, 0, bytes);
return 0;
#endif
}
@@ -1947,7 +1946,6 @@ static int __devinit lanai_pci_start(struct lanai_dev *lanai)
{
struct pci_dev *pci = lanai->pci;
int result;
- u16 w;
if (pci_enable_device(pci) != 0) {
printk(KERN_ERR DEV_LABEL "(itf %d): can't enable "
@@ -1965,13 +1963,7 @@ static int __devinit lanai_pci_start(struct lanai_dev *lanai)
"(itf %d): No suitable DMA available.\n", lanai->number);
return -EBUSY;
}
- result = pci_read_config_word(pci, PCI_SUBSYSTEM_ID, &w);
- if (result != PCIBIOS_SUCCESSFUL) {
- printk(KERN_ERR DEV_LABEL "(itf %d): can't read "
- "PCI_SUBSYSTEM_ID: %d\n", lanai->number, result);
- return -EINVAL;
- }
- result = check_board_id_and_rev("PCI", w, NULL);
+ result = check_board_id_and_rev("PCI", pci->subsystem_device, NULL);
if (result != 0)
return result;
/* Set latency timer to zero as per lanai docs */
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 6b313ee..1c70c45 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -51,7 +51,7 @@
#include <linux/idr.h>
#include <asm/io.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "nicstar.h"
#ifdef CONFIG_ATM_NICSTAR_USE_SUNI
#include "suni.h"
diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
index 41c56ea..90f1ccc 100644
--- a/drivers/atm/suni.c
+++ b/drivers/atm/suni.c
@@ -25,7 +25,7 @@
#include <asm/system.h>
#include <asm/param.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "suni.h"
diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
index c45ae05..5120a96 100644
--- a/drivers/atm/uPD98402.c
+++ b/drivers/atm/uPD98402.c
@@ -11,7 +11,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "uPD98402.h"
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 6249179..d889f56 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/uio.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/atm_zatm.h>
#include <linux/capability.h>
@@ -26,7 +27,7 @@
#include <asm/system.h>
#include <asm/string.h>
#include <asm/io.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/uaccess.h>
#include "uPD98401.h"
diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
index 5b93852..0d75285 100644
--- a/drivers/auxdisplay/ks0108.c
+++ b/drivers/auxdisplay/ks0108.c
@@ -139,6 +139,7 @@ static int __init ks0108_init(void)
ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME,
NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);
+ parport_put_port(ks0108_parport);
if (ks0108_pardevice == NULL) {
printk(KERN_ERR KS0108_NAME ": ERROR: "
"parport didn't register new device\n");
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 353781b..c1172da 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -13,6 +13,11 @@ config BCMA
Bus driver for Broadcom specific Advanced Microcontroller Bus
Architecture.
+# Support for Block-I/O. SELECT this from the driver that needs it.
+config BCMA_BLOCKIO
+ bool
+ depends on BCMA
+
config BCMA_HOST_PCI_POSSIBLE
bool
depends on BCMA && PCI = y
@@ -22,6 +27,25 @@ config BCMA_HOST_PCI
bool "Support for BCMA on PCI-host bus"
depends on BCMA_HOST_PCI_POSSIBLE
+config BCMA_DRIVER_PCI_HOSTMODE
+ bool "Driver for PCI core working in hostmode"
+ depends on BCMA && MIPS
+ help
+ PCI core hostmode operation (external PCI bus).
+
+config BCMA_HOST_SOC
+ bool
+ depends on BCMA_DRIVER_MIPS
+
+config BCMA_DRIVER_MIPS
+ bool "BCMA Broadcom MIPS core driver"
+ depends on BCMA && MIPS
+ help
+ Driver for the Broadcom MIPS core attached to Broadcom specific
+ Advanced Microcontroller Bus.
+
+ If unsure, say N
+
config BCMA_DEBUG
bool "BCMA debugging"
depends on BCMA
diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile
index 0d56245..82de24e 100644
--- a/drivers/bcma/Makefile
+++ b/drivers/bcma/Makefile
@@ -1,7 +1,10 @@
-bcma-y += main.o scan.o core.o
+bcma-y += main.o scan.o core.o sprom.o
bcma-y += driver_chipcommon.o driver_chipcommon_pmu.o
bcma-y += driver_pci.o
+bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o
+bcma-$(CONFIG_BCMA_DRIVER_MIPS) += driver_mips.o
bcma-$(CONFIG_BCMA_HOST_PCI) += host_pci.o
+bcma-$(CONFIG_BCMA_HOST_SOC) += host_soc.o
obj-$(CONFIG_BCMA) += bcma.o
ccflags-$(CONFIG_BCMA_DEBUG) := -DDEBUG
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 2f72e9c..fda56bd 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -13,11 +13,33 @@
struct bcma_bus;
/* main.c */
-extern int bcma_bus_register(struct bcma_bus *bus);
-extern void bcma_bus_unregister(struct bcma_bus *bus);
+int bcma_bus_register(struct bcma_bus *bus);
+void bcma_bus_unregister(struct bcma_bus *bus);
+int __init bcma_bus_early_register(struct bcma_bus *bus,
+ struct bcma_device *core_cc,
+ struct bcma_device *core_mips);
+#ifdef CONFIG_PM
+int bcma_bus_resume(struct bcma_bus *bus);
+#endif
/* scan.c */
int bcma_bus_scan(struct bcma_bus *bus);
+int __init bcma_bus_scan_early(struct bcma_bus *bus,
+ struct bcma_device_id *match,
+ struct bcma_device *core);
+void bcma_init_bus(struct bcma_bus *bus);
+
+/* sprom.c */
+int bcma_sprom_get(struct bcma_bus *bus);
+
+/* driver_chipcommon.c */
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
+
+/* driver_chipcommon_pmu.c */
+u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc);
+u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc);
#ifdef CONFIG_BCMA_HOST_PCI
/* host_pci.c */
@@ -25,4 +47,8 @@ extern int __init bcma_host_pci_init(void);
extern void __exit bcma_host_pci_exit(void);
#endif /* CONFIG_BCMA_HOST_PCI */
+#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
+void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
+#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
+
#endif
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
index ced379f..893f6e0 100644
--- a/drivers/bcma/core.c
+++ b/drivers/bcma/core.c
@@ -6,6 +6,7 @@
*/
#include "bcma_private.h"
+#include <linux/export.h>
#include <linux/bcma/bcma.h>
bool bcma_core_is_enabled(struct bcma_device *core)
@@ -19,7 +20,7 @@ bool bcma_core_is_enabled(struct bcma_device *core)
}
EXPORT_SYMBOL_GPL(bcma_core_is_enabled);
-static void bcma_core_disable(struct bcma_device *core, u32 flags)
+void bcma_core_disable(struct bcma_device *core, u32 flags)
{
if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
return;
@@ -31,6 +32,7 @@ static void bcma_core_disable(struct bcma_device *core, u32 flags)
bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
udelay(1);
}
+EXPORT_SYMBOL_GPL(bcma_core_disable);
int bcma_core_enable(struct bcma_device *core, u32 flags)
{
@@ -49,3 +51,77 @@ int bcma_core_enable(struct bcma_device *core, u32 flags)
return 0;
}
EXPORT_SYMBOL_GPL(bcma_core_enable);
+
+void bcma_core_set_clockmode(struct bcma_device *core,
+ enum bcma_clkmode clkmode)
+{
+ u16 i;
+
+ WARN_ON(core->id.id != BCMA_CORE_CHIPCOMMON &&
+ core->id.id != BCMA_CORE_PCIE &&
+ core->id.id != BCMA_CORE_80211);
+
+ switch (clkmode) {
+ case BCMA_CLKMODE_FAST:
+ bcma_set32(core, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
+ udelay(64);
+ for (i = 0; i < 1500; i++) {
+ if (bcma_read32(core, BCMA_CLKCTLST) &
+ BCMA_CLKCTLST_HAVEHT) {
+ i = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (i)
+ pr_err("HT force timeout\n");
+ break;
+ case BCMA_CLKMODE_DYNAMIC:
+ pr_warn("Dynamic clockmode not supported yet!\n");
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(bcma_core_set_clockmode);
+
+void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
+{
+ u16 i;
+
+ WARN_ON(req & ~BCMA_CLKCTLST_EXTRESREQ);
+ WARN_ON(status & ~BCMA_CLKCTLST_EXTRESST);
+
+ if (on) {
+ bcma_set32(core, BCMA_CLKCTLST, req);
+ for (i = 0; i < 10000; i++) {
+ if ((bcma_read32(core, BCMA_CLKCTLST) & status) ==
+ status) {
+ i = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (i)
+ pr_err("PLL enable timeout\n");
+ } else {
+ pr_warn("Disabling PLL not supported yet!\n");
+ }
+}
+EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
+
+u32 bcma_core_dma_translation(struct bcma_device *core)
+{
+ switch (core->bus->hosttype) {
+ case BCMA_HOSTTYPE_SOC:
+ return 0;
+ case BCMA_HOSTTYPE_PCI:
+ if (bcma_aread32(core, BCMA_IOST) & BCMA_IOST_DMA64)
+ return BCMA_DMA_TRANSLATION_DMA64_CMT;
+ else
+ return BCMA_DMA_TRANSLATION_DMA32_CMT;
+ default:
+ pr_err("DMA translation unknown for host %d\n",
+ core->bus->hosttype);
+ }
+ return BCMA_DMA_TRANSLATION_NONE;
+}
+EXPORT_SYMBOL(bcma_core_dma_translation);
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index 6061022..e9f1b3f 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -3,12 +3,13 @@
* ChipCommon core driver
*
* Copyright 2005, Broadcom Corporation
- * Copyright 2006, 2007, Michael Buesch <mb@bu3sch.de>
+ * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
+#include <linux/export.h>
#include <linux/bcma/bcma.h>
static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
@@ -23,6 +24,12 @@ static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
{
+ u32 leddc_on = 10;
+ u32 leddc_off = 90;
+
+ if (cc->setup_done)
+ return;
+
if (cc->core->id.rev >= 11)
cc->status = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
cc->capabilities = bcma_cc_read32(cc, BCMA_CC_CAP);
@@ -38,6 +45,19 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
bcma_pmu_init(cc);
if (cc->capabilities & BCMA_CC_CAP_PCTL)
pr_err("Power control not implemented!\n");
+
+ if (cc->core->id.rev >= 16) {
+ if (cc->core->bus->sprom.leddc_on_time &&
+ cc->core->bus->sprom.leddc_off_time) {
+ leddc_on = cc->core->bus->sprom.leddc_on_time;
+ leddc_off = cc->core->bus->sprom.leddc_off_time;
+ }
+ bcma_cc_write32(cc, BCMA_CC_GPIOTIMER,
+ ((leddc_on << BCMA_CC_GPIOTIMER_ONTIME_SHIFT) |
+ (leddc_off << BCMA_CC_GPIOTIMER_OFFTIME_SHIFT)));
+ }
+
+ cc->setup_done = true;
}
/* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */
@@ -87,3 +107,51 @@ u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value)
{
return bcma_cc_write32_masked(cc, BCMA_CC_GPIOPOL, mask, value);
}
+
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+{
+ unsigned int irq;
+ u32 baud_base;
+ u32 i;
+ unsigned int ccrev = cc->core->id.rev;
+ struct bcma_serial_port *ports = cc->serial_ports;
+
+ if (ccrev >= 11 && ccrev != 15) {
+ /* Fixed ALP clock */
+ baud_base = bcma_pmu_alp_clock(cc);
+ if (ccrev >= 21) {
+ /* Turn off UART clock before switching clocksource. */
+ bcma_cc_write32(cc, BCMA_CC_CORECTL,
+ bcma_cc_read32(cc, BCMA_CC_CORECTL)
+ & ~BCMA_CC_CORECTL_UARTCLKEN);
+ }
+ /* Set the override bit so we don't divide it */
+ bcma_cc_write32(cc, BCMA_CC_CORECTL,
+ bcma_cc_read32(cc, BCMA_CC_CORECTL)
+ | BCMA_CC_CORECTL_UARTCLK0);
+ if (ccrev >= 21) {
+ /* Re-enable the UART clock. */
+ bcma_cc_write32(cc, BCMA_CC_CORECTL,
+ bcma_cc_read32(cc, BCMA_CC_CORECTL)
+ | BCMA_CC_CORECTL_UARTCLKEN);
+ }
+ } else {
+ pr_err("serial not supported on this device ccrev: 0x%x\n",
+ ccrev);
+ return;
+ }
+
+ irq = bcma_core_mips_irq(cc->core);
+
+ /* Determine the registers of the UARTs */
+ cc->nr_serial_ports = (cc->capabilities & BCMA_CC_CAP_NRUART);
+ for (i = 0; i < cc->nr_serial_ports; i++) {
+ ports[i].regs = cc->core->io_addr + BCMA_CC_UART0_DATA +
+ (i * 256);
+ ports[i].irq = irq;
+ ports[i].baud_base = baud_base;
+ ports[i].reg_shift = 0;
+ }
+}
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index f44177a..800163c 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -2,29 +2,57 @@
* Broadcom specific AMBA
* ChipCommon Power Management Unit driver
*
- * Copyright 2009, Michael Buesch <mb@bu3sch.de>
+ * Copyright 2009, Michael Buesch <m@bues.ch>
* Copyright 2007, Broadcom Corporation
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
+#include <linux/export.h>
#include <linux/bcma/bcma.h>
-static void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
- u32 offset, u32 mask, u32 set)
+static u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
{
- u32 value;
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
+ bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
+ return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+}
- bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR);
+void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value)
+{
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
+ bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_pll_write);
+
+void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
+ u32 set)
+{
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
+ bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
+ bcma_cc_maskset32(cc, BCMA_CC_PLLCTL_DATA, mask, set);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_pll_maskset);
+
+void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
+ u32 offset, u32 mask, u32 set)
+{
bcma_cc_write32(cc, BCMA_CC_CHIPCTL_ADDR, offset);
bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR);
- value = bcma_cc_read32(cc, BCMA_CC_CHIPCTL_DATA);
- value &= mask;
- value |= set;
- bcma_cc_write32(cc, BCMA_CC_CHIPCTL_DATA, value);
- bcma_cc_read32(cc, BCMA_CC_CHIPCTL_DATA);
+ bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL_DATA, mask, set);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_chipctl_maskset);
+
+void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
+ u32 set)
+{
+ bcma_cc_write32(cc, BCMA_CC_REGCTL_ADDR, offset);
+ bcma_cc_read32(cc, BCMA_CC_REGCTL_ADDR);
+ bcma_cc_maskset32(cc, BCMA_CC_REGCTL_DATA, mask, set);
}
+EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
static void bcma_pmu_pll_init(struct bcma_drv_cc *cc)
{
@@ -53,6 +81,7 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
max_msk = 0xFFFF;
break;
case 43224:
+ case 43225:
break;
default:
pr_err("PMU resource config unknown for device 0x%04X\n",
@@ -74,6 +103,7 @@ void bcma_pmu_swreg_init(struct bcma_drv_cc *cc)
case 0x4313:
case 0x4331:
case 43224:
+ case 43225:
break;
default:
pr_err("PMU switch/regulators init unknown for device "
@@ -81,6 +111,24 @@ void bcma_pmu_swreg_init(struct bcma_drv_cc *cc)
}
}
+/* Disable to allow reading SPROM. Don't know the adventages of enabling it. */
+void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable)
+{
+ struct bcma_bus *bus = cc->core->bus;
+ u32 val;
+
+ val = bcma_cc_read32(cc, BCMA_CC_CHIPCTL);
+ if (enable) {
+ val |= BCMA_CHIPCTL_4331_EXTPA_EN;
+ if (bus->chipinfo.pkg == 9 || bus->chipinfo.pkg == 11)
+ val |= BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
+ } else {
+ val &= ~BCMA_CHIPCTL_4331_EXTPA_EN;
+ val &= ~BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
+ }
+ bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val);
+}
+
void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
@@ -90,17 +138,19 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7);
break;
case 0x4331:
- pr_err("Enabling Ext PA lines not implemented\n");
+ /* BCM4331 workaround is SPROM-related, we put it in sprom.c */
break;
case 43224:
if (bus->chipinfo.rev == 0) {
pr_err("Workarounds for 43224 rev 0 not fully "
"implemented\n");
- bcma_chipco_chipctl_maskset(cc, 0, ~0, 0xF0);
+ bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x00F000F0);
} else {
bcma_chipco_chipctl_maskset(cc, 0, ~0, 0xF0);
}
break;
+ case 43225:
+ break;
default:
pr_err("Workarounds unknown for device 0x%04X\n",
bus->chipinfo.id);
@@ -132,3 +182,129 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
bcma_pmu_swreg_init(cc);
bcma_pmu_workarounds(cc);
}
+
+u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc)
+{
+ struct bcma_bus *bus = cc->core->bus;
+
+ switch (bus->chipinfo.id) {
+ case 0x4716:
+ case 0x4748:
+ case 47162:
+ case 0x4313:
+ case 0x5357:
+ case 0x4749:
+ case 53572:
+ /* always 20Mhz */
+ return 20000 * 1000;
+ case 0x5356:
+ case 0x5300:
+ /* always 25Mhz */
+ return 25000 * 1000;
+ default:
+ pr_warn("No ALP clock specified for %04X device, "
+ "pmu rev. %d, using default %d Hz\n",
+ bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
+ }
+ return BCMA_CC_PMU_ALP_CLOCK;
+}
+
+/* Find the output of the "m" pll divider given pll controls that start with
+ * pllreg "pll0" i.e. 12 for main 6 for phy, 0 for misc.
+ */
+static u32 bcma_pmu_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
+{
+ u32 tmp, div, ndiv, p1, p2, fc;
+ struct bcma_bus *bus = cc->core->bus;
+
+ BUG_ON((pll0 & 3) || (pll0 > BCMA_CC_PMU4716_MAINPLL_PLL0));
+
+ BUG_ON(!m || m > 4);
+
+ if (bus->chipinfo.id == 0x5357 || bus->chipinfo.id == 0x4749) {
+ /* Detect failure in clock setting */
+ tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
+ if (tmp & 0x40000)
+ return 133 * 1000000;
+ }
+
+ tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PPL_P1P2_OFF);
+ p1 = (tmp & BCMA_CC_PPL_P1_MASK) >> BCMA_CC_PPL_P1_SHIFT;
+ p2 = (tmp & BCMA_CC_PPL_P2_MASK) >> BCMA_CC_PPL_P2_SHIFT;
+
+ tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PPL_M14_OFF);
+ div = (tmp >> ((m - 1) * BCMA_CC_PPL_MDIV_WIDTH)) &
+ BCMA_CC_PPL_MDIV_MASK;
+
+ tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PPL_NM5_OFF);
+ ndiv = (tmp & BCMA_CC_PPL_NDIV_MASK) >> BCMA_CC_PPL_NDIV_SHIFT;
+
+ /* Do calculation in Mhz */
+ fc = bcma_pmu_alp_clock(cc) / 1000000;
+ fc = (p1 * ndiv * fc) / p2;
+
+ /* Return clock in Hertz */
+ return (fc / div) * 1000000;
+}
+
+/* query bus clock frequency for PMU-enabled chipcommon */
+u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
+{
+ struct bcma_bus *bus = cc->core->bus;
+
+ switch (bus->chipinfo.id) {
+ case 0x4716:
+ case 0x4748:
+ case 47162:
+ return bcma_pmu_clock(cc, BCMA_CC_PMU4716_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_SSB);
+ case 0x5356:
+ return bcma_pmu_clock(cc, BCMA_CC_PMU5356_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_SSB);
+ case 0x5357:
+ case 0x4749:
+ return bcma_pmu_clock(cc, BCMA_CC_PMU5357_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_SSB);
+ case 0x5300:
+ return bcma_pmu_clock(cc, BCMA_CC_PMU4706_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_SSB);
+ case 53572:
+ return 75000000;
+ default:
+ pr_warn("No backplane clock specified for %04X device, "
+ "pmu rev. %d, using default %d Hz\n",
+ bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
+ }
+ return BCMA_CC_PMU_HT_CLOCK;
+}
+
+/* query cpu clock frequency for PMU-enabled chipcommon */
+u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc)
+{
+ struct bcma_bus *bus = cc->core->bus;
+
+ if (bus->chipinfo.id == 53572)
+ return 300000000;
+
+ if (cc->pmu.rev >= 5) {
+ u32 pll;
+ switch (bus->chipinfo.id) {
+ case 0x5356:
+ pll = BCMA_CC_PMU5356_MAINPLL_PLL0;
+ break;
+ case 0x5357:
+ case 0x4749:
+ pll = BCMA_CC_PMU5357_MAINPLL_PLL0;
+ break;
+ default:
+ pll = BCMA_CC_PMU4716_MAINPLL_PLL0;
+ break;
+ }
+
+ /* TODO: if (bus->chipinfo.id == 0x5300)
+ return si_4706_pmu_clock(sih, osh, cc, PMU4706_MAINPLL_PLL0, PMU5_MAINPLL_CPU); */
+ return bcma_pmu_clock(cc, pll, BCMA_CC_PMU5_MAINPLL_CPU);
+ }
+
+ return bcma_pmu_get_clockcontrol(cc);
+}
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index e757e4e..4fde625 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -3,12 +3,13 @@
* PCI Core
*
* Copyright 2005, Broadcom Corporation
- * Copyright 2006, 2007, Michael Buesch <mb@bu3sch.de>
+ * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
#include "bcma_private.h"
+#include <linux/export.h>
#include <linux/bcma/bcma.h>
/**************************************************
@@ -157,7 +158,81 @@ static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
* Init.
**************************************************/
-void bcma_core_pci_init(struct bcma_drv_pci *pc)
+static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
{
bcma_pcicore_serdes_workaround(pc);
}
+
+static bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
+{
+ struct bcma_bus *bus = pc->core->bus;
+ u16 chipid_top;
+
+ chipid_top = (bus->chipinfo.id & 0xFF00);
+ if (chipid_top != 0x4700 &&
+ chipid_top != 0x5300)
+ return false;
+
+#ifdef CONFIG_SSB_DRIVER_PCICORE
+ if (bus->sprom.boardflags_lo & SSB_BFL_NOPCI)
+ return false;
+#endif /* CONFIG_SSB_DRIVER_PCICORE */
+
+#if 0
+ /* TODO: on BCMA we use address from EROM instead of magic formula */
+ u32 tmp;
+ return !mips_busprobe32(tmp, (bus->mmio +
+ (pc->core->core_index * BCMA_CORE_SIZE)));
+#endif
+
+ return true;
+}
+
+void bcma_core_pci_init(struct bcma_drv_pci *pc)
+{
+ if (pc->setup_done)
+ return;
+
+ if (bcma_core_pci_is_in_hostmode(pc)) {
+#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
+ bcma_core_pci_hostmode_init(pc);
+#else
+ pr_err("Driver compiled without support for hostmode PCI\n");
+#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
+ } else {
+ bcma_core_pci_clientmode_init(pc);
+ }
+
+ pc->setup_done = true;
+}
+
+int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
+ bool enable)
+{
+ struct pci_dev *pdev = pc->core->bus->host_pci;
+ u32 coremask, tmp;
+ int err = 0;
+
+ if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
+ /* This bcma device is not on a PCI host-bus. So the IRQs are
+ * not routed through the PCI core.
+ * So we must not enable routing through the PCI core. */
+ goto out;
+ }
+
+ err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
+ if (err)
+ goto out;
+
+ coremask = BIT(core->core_index) << 8;
+ if (enable)
+ tmp |= coremask;
+ else
+ tmp &= ~coremask;
+
+ err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
+
+out:
+ return err;
+}
+EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 471a040..48e06be 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/bcma/bcma.h>
#include <linux/pci.h>
+#include <linux/module.h>
static void bcma_host_pci_switch_core(struct bcma_device *core)
{
@@ -65,6 +66,54 @@ static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
iowrite32(value, core->bus->mmio + offset);
}
+#ifdef CONFIG_BCMA_BLOCKIO
+void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
+ size_t count, u16 offset, u8 reg_width)
+{
+ void __iomem *addr = core->bus->mmio + offset;
+ if (core->bus->mapped_core != core)
+ bcma_host_pci_switch_core(core);
+ switch (reg_width) {
+ case sizeof(u8):
+ ioread8_rep(addr, buffer, count);
+ break;
+ case sizeof(u16):
+ WARN_ON(count & 1);
+ ioread16_rep(addr, buffer, count >> 1);
+ break;
+ case sizeof(u32):
+ WARN_ON(count & 3);
+ ioread32_rep(addr, buffer, count >> 2);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+void bcma_host_pci_block_write(struct bcma_device *core, const void *buffer,
+ size_t count, u16 offset, u8 reg_width)
+{
+ void __iomem *addr = core->bus->mmio + offset;
+ if (core->bus->mapped_core != core)
+ bcma_host_pci_switch_core(core);
+ switch (reg_width) {
+ case sizeof(u8):
+ iowrite8_rep(addr, buffer, count);
+ break;
+ case sizeof(u16):
+ WARN_ON(count & 1);
+ iowrite16_rep(addr, buffer, count >> 1);
+ break;
+ case sizeof(u32):
+ WARN_ON(count & 3);
+ iowrite32_rep(addr, buffer, count >> 2);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+#endif
+
static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset)
{
if (core->bus->mapped_core != core)
@@ -87,6 +136,10 @@ const struct bcma_host_ops bcma_host_pci_ops = {
.write8 = bcma_host_pci_write8,
.write16 = bcma_host_pci_write16,
.write32 = bcma_host_pci_write32,
+#ifdef CONFIG_BCMA_BLOCKIO
+ .block_read = bcma_host_pci_block_read,
+ .block_write = bcma_host_pci_block_write,
+#endif
.aread32 = bcma_host_pci_aread32,
.awrite32 = bcma_host_pci_awrite32,
};
@@ -171,10 +224,49 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
pci_set_drvdata(dev, NULL);
}
+#ifdef CONFIG_PM
+static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state)
+{
+ struct bcma_bus *bus = pci_get_drvdata(dev);
+
+ /* Host specific */
+ pci_save_state(dev);
+ pci_disable_device(dev);
+ pci_set_power_state(dev, pci_choose_state(dev, state));
+
+ bus->mapped_core = NULL;
+ return 0;
+}
+
+static int bcma_host_pci_resume(struct pci_dev *dev)
+{
+ struct bcma_bus *bus = pci_get_drvdata(dev);
+ int err;
+
+ /* Host specific */
+ pci_set_power_state(dev, 0);
+ err = pci_enable_device(dev);
+ if (err)
+ return err;
+ pci_restore_state(dev);
+
+ /* Bus specific */
+ err = bcma_bus_resume(bus);
+ if (err)
+ return err;
+
+ return 0;
+}
+#else /* CONFIG_PM */
+# define bcma_host_pci_suspend NULL
+# define bcma_host_pci_resume NULL
+#endif /* CONFIG_PM */
+
static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
{ 0, },
};
@@ -185,6 +277,8 @@ static struct pci_driver bcma_pci_bridge_driver = {
.id_table = bcma_pci_bridge_tbl,
.probe = bcma_host_pci_probe,
.remove = bcma_host_pci_remove,
+ .suspend = bcma_host_pci_suspend,
+ .resume = bcma_host_pci_resume,
};
int __init bcma_host_pci_init(void)
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index a9cb238..7a987a7 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -6,7 +6,9 @@
*/
#include "bcma_private.h"
+#include <linux/module.h>
#include <linux/bcma/bcma.h>
+#include <linux/slab.h>
MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
MODULE_LICENSE("GPL");
@@ -14,6 +16,7 @@ MODULE_LICENSE("GPL");
static int bcma_bus_match(struct device *dev, struct device_driver *drv);
static int bcma_device_probe(struct device *dev);
static int bcma_device_remove(struct device *dev);
+static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -48,6 +51,7 @@ static struct bus_type bcma_bus_type = {
.match = bcma_bus_match,
.probe = bcma_device_probe,
.remove = bcma_device_remove,
+ .uevent = bcma_device_uevent,
.dev_attrs = bcma_device_attrs,
};
@@ -65,6 +69,10 @@ static struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
static void bcma_release_core_dev(struct device *dev)
{
struct bcma_device *core = container_of(dev, struct bcma_device, dev);
+ if (core->io_addr)
+ iounmap(core->io_addr);
+ if (core->io_wrap)
+ iounmap(core->io_wrap);
kfree(core);
}
@@ -79,6 +87,7 @@ static int bcma_register_cores(struct bcma_bus *bus)
case BCMA_CORE_CHIPCOMMON:
case BCMA_CORE_PCI:
case BCMA_CORE_PCIE:
+ case BCMA_CORE_MIPS_74K:
continue;
}
@@ -89,8 +98,13 @@ static int bcma_register_cores(struct bcma_bus *bus)
switch (bus->hosttype) {
case BCMA_HOSTTYPE_PCI:
core->dev.parent = &bus->host_pci->dev;
+ core->dma_dev = &bus->host_pci->dev;
+ core->irq = bus->host_pci->irq;
+ break;
+ case BCMA_HOSTTYPE_SOC:
+ core->dev.dma_mask = &core->dev.coherent_dma_mask;
+ core->dma_dev = &core->dev;
break;
- case BCMA_HOSTTYPE_NONE:
case BCMA_HOSTTYPE_SDIO:
break;
}
@@ -138,6 +152,13 @@ int bcma_bus_register(struct bcma_bus *bus)
bcma_core_chipcommon_init(&bus->drv_cc);
}
+ /* Init MIPS core */
+ core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
+ if (core) {
+ bus->drv_mips.core = core;
+ bcma_core_mips_init(&bus->drv_mips);
+ }
+
/* Init PCIE core */
core = bcma_find_core(bus, BCMA_CORE_PCIE);
if (core) {
@@ -145,6 +166,15 @@ int bcma_bus_register(struct bcma_bus *bus)
bcma_core_pci_init(&bus->drv_pci);
}
+ /* Try to get SPROM */
+ err = bcma_sprom_get(bus);
+ if (err == -ENOENT) {
+ pr_err("No SPROM available\n");
+ } else if (err) {
+ pr_err("Failed to get SPROM: %d\n", err);
+ return -ENOENT;
+ }
+
/* Register found cores */
bcma_register_cores(bus);
@@ -152,13 +182,80 @@ int bcma_bus_register(struct bcma_bus *bus)
return 0;
}
-EXPORT_SYMBOL_GPL(bcma_bus_register);
void bcma_bus_unregister(struct bcma_bus *bus)
{
bcma_unregister_cores(bus);
}
-EXPORT_SYMBOL_GPL(bcma_bus_unregister);
+
+int __init bcma_bus_early_register(struct bcma_bus *bus,
+ struct bcma_device *core_cc,
+ struct bcma_device *core_mips)
+{
+ int err;
+ struct bcma_device *core;
+ struct bcma_device_id match;
+
+ bcma_init_bus(bus);
+
+ match.manuf = BCMA_MANUF_BCM;
+ match.id = BCMA_CORE_CHIPCOMMON;
+ match.class = BCMA_CL_SIM;
+ match.rev = BCMA_ANY_REV;
+
+ /* Scan for chip common core */
+ err = bcma_bus_scan_early(bus, &match, core_cc);
+ if (err) {
+ pr_err("Failed to scan for common core: %d\n", err);
+ return -1;
+ }
+
+ match.manuf = BCMA_MANUF_MIPS;
+ match.id = BCMA_CORE_MIPS_74K;
+ match.class = BCMA_CL_SIM;
+ match.rev = BCMA_ANY_REV;
+
+ /* Scan for mips core */
+ err = bcma_bus_scan_early(bus, &match, core_mips);
+ if (err) {
+ pr_err("Failed to scan for mips core: %d\n", err);
+ return -1;
+ }
+
+ /* Init CC core */
+ core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
+ if (core) {
+ bus->drv_cc.core = core;
+ bcma_core_chipcommon_init(&bus->drv_cc);
+ }
+
+ /* Init MIPS core */
+ core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
+ if (core) {
+ bus->drv_mips.core = core;
+ bcma_core_mips_init(&bus->drv_mips);
+ }
+
+ pr_info("Early bus registered\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+int bcma_bus_resume(struct bcma_bus *bus)
+{
+ struct bcma_device *core;
+
+ /* Init CC core */
+ core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
+ if (core) {
+ bus->drv_cc.setup_done = false;
+ bcma_core_chipcommon_init(&bus->drv_cc);
+ }
+
+ return 0;
+}
+#endif
int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
{
@@ -218,6 +315,16 @@ static int bcma_device_remove(struct device *dev)
return 0;
}
+static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct bcma_device *core = container_of(dev, struct bcma_device, dev);
+
+ return add_uevent_var(env,
+ "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
+ core->id.manuf, core->id.id,
+ core->id.rev, core->id.class);
+}
+
static int __init bcma_modinit(void)
{
int err;
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 40d7dcc..cad9948 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -200,18 +200,162 @@ static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 **eromptr,
return addrl;
}
-int bcma_bus_scan(struct bcma_bus *bus)
+static struct bcma_device *bcma_find_core_by_index(struct bcma_bus *bus,
+ u16 index)
{
- u32 erombase;
- u32 __iomem *eromptr, *eromend;
+ struct bcma_device *core;
+ list_for_each_entry(core, &bus->cores, list) {
+ if (core->core_index == index)
+ return core;
+ }
+ return NULL;
+}
+
+static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
+ struct bcma_device_id *match, int core_num,
+ struct bcma_device *core)
+{
+ s32 tmp;
+ u8 i, j;
s32 cia, cib;
u8 ports[2], wrappers[2];
+ /* get CIs */
+ cia = bcma_erom_get_ci(bus, eromptr);
+ if (cia < 0) {
+ bcma_erom_push_ent(eromptr);
+ if (bcma_erom_is_end(bus, eromptr))
+ return -ESPIPE;
+ return -EILSEQ;
+ }
+ cib = bcma_erom_get_ci(bus, eromptr);
+ if (cib < 0)
+ return -EILSEQ;
+
+ /* parse CIs */
+ core->id.class = (cia & SCAN_CIA_CLASS) >> SCAN_CIA_CLASS_SHIFT;
+ core->id.id = (cia & SCAN_CIA_ID) >> SCAN_CIA_ID_SHIFT;
+ core->id.manuf = (cia & SCAN_CIA_MANUF) >> SCAN_CIA_MANUF_SHIFT;
+ ports[0] = (cib & SCAN_CIB_NMP) >> SCAN_CIB_NMP_SHIFT;
+ ports[1] = (cib & SCAN_CIB_NSP) >> SCAN_CIB_NSP_SHIFT;
+ wrappers[0] = (cib & SCAN_CIB_NMW) >> SCAN_CIB_NMW_SHIFT;
+ wrappers[1] = (cib & SCAN_CIB_NSW) >> SCAN_CIB_NSW_SHIFT;
+ core->id.rev = (cib & SCAN_CIB_REV) >> SCAN_CIB_REV_SHIFT;
+
+ if (((core->id.manuf == BCMA_MANUF_ARM) &&
+ (core->id.id == 0xFFF)) ||
+ (ports[1] == 0)) {
+ bcma_erom_skip_component(bus, eromptr);
+ return -ENXIO;
+ }
+
+ /* check if component is a core at all */
+ if (wrappers[0] + wrappers[1] == 0) {
+ /* we could save addrl of the router
+ if (cid == BCMA_CORE_OOB_ROUTER)
+ */
+ bcma_erom_skip_component(bus, eromptr);
+ return -ENXIO;
+ }
+
+ if (bcma_erom_is_bridge(bus, eromptr)) {
+ bcma_erom_skip_component(bus, eromptr);
+ return -ENXIO;
+ }
+
+ if (bcma_find_core_by_index(bus, core_num)) {
+ bcma_erom_skip_component(bus, eromptr);
+ return -ENODEV;
+ }
+
+ if (match && ((match->manuf != BCMA_ANY_MANUF &&
+ match->manuf != core->id.manuf) ||
+ (match->id != BCMA_ANY_ID && match->id != core->id.id) ||
+ (match->rev != BCMA_ANY_REV && match->rev != core->id.rev) ||
+ (match->class != BCMA_ANY_CLASS && match->class != core->id.class)
+ )) {
+ bcma_erom_skip_component(bus, eromptr);
+ return -ENODEV;
+ }
+
+ /* get & parse master ports */
+ for (i = 0; i < ports[0]; i++) {
+ s32 mst_port_d = bcma_erom_get_mst_port(bus, eromptr);
+ if (mst_port_d < 0)
+ return -EILSEQ;
+ }
+
+ /* get & parse slave ports */
+ for (i = 0; i < ports[1]; i++) {
+ for (j = 0; ; j++) {
+ tmp = bcma_erom_get_addr_desc(bus, eromptr,
+ SCAN_ADDR_TYPE_SLAVE, i);
+ if (tmp < 0) {
+ /* no more entries for port _i_ */
+ /* pr_debug("erom: slave port %d "
+ * "has %d descriptors\n", i, j); */
+ break;
+ } else {
+ if (i == 0 && j == 0)
+ core->addr = tmp;
+ }
+ }
+ }
+
+ /* get & parse master wrappers */
+ for (i = 0; i < wrappers[0]; i++) {
+ for (j = 0; ; j++) {
+ tmp = bcma_erom_get_addr_desc(bus, eromptr,
+ SCAN_ADDR_TYPE_MWRAP, i);
+ if (tmp < 0) {
+ /* no more entries for port _i_ */
+ /* pr_debug("erom: master wrapper %d "
+ * "has %d descriptors\n", i, j); */
+ break;
+ } else {
+ if (i == 0 && j == 0)
+ core->wrap = tmp;
+ }
+ }
+ }
+
+ /* get & parse slave wrappers */
+ for (i = 0; i < wrappers[1]; i++) {
+ u8 hack = (ports[1] == 1) ? 0 : 1;
+ for (j = 0; ; j++) {
+ tmp = bcma_erom_get_addr_desc(bus, eromptr,
+ SCAN_ADDR_TYPE_SWRAP, i + hack);
+ if (tmp < 0) {
+ /* no more entries for port _i_ */
+ /* pr_debug("erom: master wrapper %d "
+ * has %d descriptors\n", i, j); */
+ break;
+ } else {
+ if (wrappers[0] == 0 && !i && !j)
+ core->wrap = tmp;
+ }
+ }
+ }
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
+ core->io_addr = ioremap_nocache(core->addr, BCMA_CORE_SIZE);
+ if (!core->io_addr)
+ return -ENOMEM;
+ core->io_wrap = ioremap_nocache(core->wrap, BCMA_CORE_SIZE);
+ if (!core->io_wrap) {
+ iounmap(core->io_addr);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+void bcma_init_bus(struct bcma_bus *bus)
+{
s32 tmp;
- u8 i, j;
- int err;
+ if (bus->init_done)
+ return;
INIT_LIST_HEAD(&bus->cores);
bus->nr_cores = 0;
@@ -222,9 +366,27 @@ int bcma_bus_scan(struct bcma_bus *bus)
bus->chipinfo.id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT;
bus->chipinfo.rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT;
bus->chipinfo.pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT;
+ bus->init_done = true;
+}
+
+int bcma_bus_scan(struct bcma_bus *bus)
+{
+ u32 erombase;
+ u32 __iomem *eromptr, *eromend;
+
+ int err, core_num = 0;
+
+ bcma_init_bus(bus);
erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM);
- eromptr = bus->mmio;
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
+ eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE);
+ if (!eromptr)
+ return -ENOMEM;
+ } else {
+ eromptr = bus->mmio;
+ }
+
eromend = eromptr + BCMA_CORE_SIZE / sizeof(u32);
bcma_scan_switch_core(bus, erombase);
@@ -236,125 +398,89 @@ int bcma_bus_scan(struct bcma_bus *bus)
INIT_LIST_HEAD(&core->list);
core->bus = bus;
- /* get CIs */
- cia = bcma_erom_get_ci(bus, &eromptr);
- if (cia < 0) {
- bcma_erom_push_ent(&eromptr);
- if (bcma_erom_is_end(bus, &eromptr))
- break;
- err= -EILSEQ;
- goto out;
- }
- cib = bcma_erom_get_ci(bus, &eromptr);
- if (cib < 0) {
- err= -EILSEQ;
- goto out;
- }
-
- /* parse CIs */
- core->id.class = (cia & SCAN_CIA_CLASS) >> SCAN_CIA_CLASS_SHIFT;
- core->id.id = (cia & SCAN_CIA_ID) >> SCAN_CIA_ID_SHIFT;
- core->id.manuf = (cia & SCAN_CIA_MANUF) >> SCAN_CIA_MANUF_SHIFT;
- ports[0] = (cib & SCAN_CIB_NMP) >> SCAN_CIB_NMP_SHIFT;
- ports[1] = (cib & SCAN_CIB_NSP) >> SCAN_CIB_NSP_SHIFT;
- wrappers[0] = (cib & SCAN_CIB_NMW) >> SCAN_CIB_NMW_SHIFT;
- wrappers[1] = (cib & SCAN_CIB_NSW) >> SCAN_CIB_NSW_SHIFT;
- core->id.rev = (cib & SCAN_CIB_REV) >> SCAN_CIB_REV_SHIFT;
-
- if (((core->id.manuf == BCMA_MANUF_ARM) &&
- (core->id.id == 0xFFF)) ||
- (ports[1] == 0)) {
- bcma_erom_skip_component(bus, &eromptr);
+ err = bcma_get_next_core(bus, &eromptr, NULL, core_num, core);
+ if (err == -ENODEV) {
+ core_num++;
continue;
- }
-
- /* check if component is a core at all */
- if (wrappers[0] + wrappers[1] == 0) {
- /* we could save addrl of the router
- if (cid == BCMA_CORE_OOB_ROUTER)
- */
- bcma_erom_skip_component(bus, &eromptr);
+ } else if (err == -ENXIO)
continue;
- }
+ else if (err == -ESPIPE)
+ break;
+ else if (err < 0)
+ return err;
- if (bcma_erom_is_bridge(bus, &eromptr)) {
- bcma_erom_skip_component(bus, &eromptr);
- continue;
- }
+ core->core_index = core_num++;
+ bus->nr_cores++;
- /* get & parse master ports */
- for (i = 0; i < ports[0]; i++) {
- u32 mst_port_d = bcma_erom_get_mst_port(bus, &eromptr);
- if (mst_port_d < 0) {
- err= -EILSEQ;
- goto out;
- }
- }
+ pr_info("Core %d found: %s "
+ "(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
+ core->core_index, bcma_device_name(&core->id),
+ core->id.manuf, core->id.id, core->id.rev,
+ core->id.class);
- /* get & parse slave ports */
- for (i = 0; i < ports[1]; i++) {
- for (j = 0; ; j++) {
- tmp = bcma_erom_get_addr_desc(bus, &eromptr,
- SCAN_ADDR_TYPE_SLAVE, i);
- if (tmp < 0) {
- /* no more entries for port _i_ */
- /* pr_debug("erom: slave port %d "
- * "has %d descriptors\n", i, j); */
- break;
- } else {
- if (i == 0 && j == 0)
- core->addr = tmp;
- }
- }
- }
+ list_add(&core->list, &bus->cores);
+ }
- /* get & parse master wrappers */
- for (i = 0; i < wrappers[0]; i++) {
- for (j = 0; ; j++) {
- tmp = bcma_erom_get_addr_desc(bus, &eromptr,
- SCAN_ADDR_TYPE_MWRAP, i);
- if (tmp < 0) {
- /* no more entries for port _i_ */
- /* pr_debug("erom: master wrapper %d "
- * "has %d descriptors\n", i, j); */
- break;
- } else {
- if (i == 0 && j == 0)
- core->wrap = tmp;
- }
- }
- }
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC)
+ iounmap(eromptr);
- /* get & parse slave wrappers */
- for (i = 0; i < wrappers[1]; i++) {
- u8 hack = (ports[1] == 1) ? 0 : 1;
- for (j = 0; ; j++) {
- tmp = bcma_erom_get_addr_desc(bus, &eromptr,
- SCAN_ADDR_TYPE_SWRAP, i + hack);
- if (tmp < 0) {
- /* no more entries for port _i_ */
- /* pr_debug("erom: master wrapper %d "
- * has %d descriptors\n", i, j); */
- break;
- } else {
- if (wrappers[0] == 0 && !i && !j)
- core->wrap = tmp;
- }
- }
- }
+ return 0;
+}
+
+int __init bcma_bus_scan_early(struct bcma_bus *bus,
+ struct bcma_device_id *match,
+ struct bcma_device *core)
+{
+ u32 erombase;
+ u32 __iomem *eromptr, *eromend;
+
+ int err = -ENODEV;
+ int core_num = 0;
+
+ erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM);
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
+ eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE);
+ if (!eromptr)
+ return -ENOMEM;
+ } else {
+ eromptr = bus->mmio;
+ }
+
+ eromend = eromptr + BCMA_CORE_SIZE / sizeof(u32);
+
+ bcma_scan_switch_core(bus, erombase);
+
+ while (eromptr < eromend) {
+ memset(core, 0, sizeof(*core));
+ INIT_LIST_HEAD(&core->list);
+ core->bus = bus;
+ err = bcma_get_next_core(bus, &eromptr, match, core_num, core);
+ if (err == -ENODEV) {
+ core_num++;
+ continue;
+ } else if (err == -ENXIO)
+ continue;
+ else if (err == -ESPIPE)
+ break;
+ else if (err < 0)
+ return err;
+
+ core->core_index = core_num++;
+ bus->nr_cores++;
pr_info("Core %d found: %s "
"(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
- bus->nr_cores, bcma_device_name(&core->id),
+ core->core_index, bcma_device_name(&core->id),
core->id.manuf, core->id.id, core->id.rev,
core->id.class);
- core->core_index = bus->nr_cores++;
list_add(&core->list, &bus->cores);
- continue;
-out:
- return err;
+ err = 0;
+ break;
}
- return 0;
+ if (bus->hosttype == BCMA_HOSTTYPE_SOC)
+ iounmap(eromptr);
+
+ return err;
}
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index fd484a9..1331740 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1929,11 +1929,17 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s,
goto out;
s->manufact.len = buf[0] << 8 | buf[1];
- if (s->manufact.len < 0 || s->manufact.len > 2048) {
+ if (s->manufact.len < 0) {
cdinfo(CD_WARNING, "Received invalid manufacture info length"
" (%d)\n", s->manufact.len);
ret = -EIO;
} else {
+ if (s->manufact.len > 2048) {
+ cdinfo(CD_WARNING, "Received invalid manufacture info "
+ "length (%d): truncating to 2048\n",
+ s->manufact.len);
+ s->manufact.len = 2048;
+ }
memcpy(s->manufact.value, &buf[4], s->manufact.len);
}
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index f27d0d0..4b71647 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -171,7 +171,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
}
got_gatt = 1;
- bridge->key_list = vmalloc(PAGE_SIZE * 4);
+ bridge->key_list = vzalloc(PAGE_SIZE * 4);
if (bridge->key_list == NULL) {
dev_err(&bridge->dev->dev,
"can't allocate memory for key lists\n");
@@ -181,7 +181,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
got_keylist = 1;
/* FIXME vmalloc'd memory not guaranteed contiguous */
- memset(bridge->key_list, 0, PAGE_SIZE * 4);
if (bridge->driver->configure()) {
dev_err(&bridge->dev->dev, "error configuring host chipset\n");
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 056b289..3695773 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -336,7 +336,8 @@ hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
off_t j, io_pg_start;
int io_pg_count;
- if (type != 0 || mem->type != 0) {
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
return -EINVAL;
}
@@ -380,7 +381,8 @@ hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
struct _hp_private *hp = &hp_private;
int i, io_pg_start, io_pg_count;
- if (type != 0 || mem->type != 0) {
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
return -EINVAL;
}
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 6f24604..439d7e7 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -235,6 +235,7 @@
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
+#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
int intel_gmch_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge);
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 8515101..59d4697 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -30,10 +30,10 @@
/*
* If we have Intel graphics, we're not going to have anything other than
* an Intel IOMMU. So make the correct use of the PCI DMA API contingent
- * on the Intel IOMMU support (CONFIG_DMAR).
+ * on the Intel IOMMU support (CONFIG_INTEL_IOMMU).
* Only newer chipsets need to bother with this, of course.
*/
-#ifdef CONFIG_DMAR
+#ifdef CONFIG_INTEL_IOMMU
#define USE_PCI_DMA_API 1
#else
#define USE_PCI_DMA_API 0
@@ -923,6 +923,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
{
int ret = -EINVAL;
+ if (intel_private.base.do_idle_maps)
+ return -ENODEV;
+
if (intel_private.clear_fake_agp) {
int start = intel_private.base.stolen_size / PAGE_SIZE;
int end = intel_private.base.gtt_mappable_entries;
@@ -985,6 +988,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
if (mem->page_count == 0)
return 0;
+ if (intel_private.base.do_idle_maps)
+ return -ENODEV;
+
intel_gtt_clear_range(pg_start, mem->page_count);
if (intel_private.base.needs_dmar) {
@@ -1177,6 +1183,26 @@ static void gen6_cleanup(void)
{
}
+/* Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+static inline int needs_idle_maps(void)
+{
+#ifdef CONFIG_INTEL_IOMMU
+ const unsigned short gpu_devid = intel_private.pcidev->device;
+ extern int intel_iommu_gfx_mapped;
+
+ /* Query intel_iommu to see if we need the workaround. Presumably that
+ * was loaded first.
+ */
+ if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
+ gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+ intel_iommu_gfx_mapped)
+ return 1;
+#endif
+ return 0;
+}
+
static int i9xx_setup(void)
{
u32 reg_addr;
@@ -1211,6 +1237,9 @@ static int i9xx_setup(void)
intel_private.gtt_bus_addr = reg_addr + gtt_offset;
}
+ if (needs_idle_maps())
+ intel_private.base.do_idle_maps = 1;
+
intel_i9xx_setup_flush();
return 0;
@@ -1430,6 +1459,8 @@ static const struct intel_gtt_driver_description {
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
+ { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
+ "Ivybridge", &sandybridge_gtt_driver },
{ 0, NULL, NULL }
};
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index a60043b..0689bf6 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -60,6 +60,19 @@ config HW_RANDOM_AMD
If unsure, say Y.
+config HW_RANDOM_ATMEL
+ tristate "Atmel Random Number Generator support"
+ depends on HW_RANDOM && ARCH_AT91SAM9G45
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on Atmel AT91 devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called atmel-rng.
+
+ If unsure, say Y.
+
config HW_RANDOM_GEODE
tristate "AMD Geode HW Random Number Generator support"
depends on HW_RANDOM && X86_32 && PCI
@@ -210,3 +223,30 @@ config HW_RANDOM_PICOXCELL
module will be called picoxcell-rng.
If unsure, say Y.
+
+config HW_RANDOM_PPC4XX
+ tristate "PowerPC 4xx generic true random number generator support"
+ depends on HW_RANDOM && PPC && 4xx
+ ---help---
+ This driver provides the kernel-side support for the TRNG hardware
+ found in the security function of some PowerPC 4xx SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ppc4xx-rng.
+
+ If unsure, say N.
+
+config UML_RANDOM
+ depends on UML
+ tristate "Hardware random number generator"
+ help
+ This option enables UML's "hardware" random number generator. It
+ attaches itself to the host's /dev/random, supplying as much entropy
+ as the host has, rather than the small amount the UML gets from its
+ own drivers. It registers itself as a standard hardware random number
+ generator, major 10, minor 183, and the canonical device name is
+ /dev/hwrng.
+ The way to make use of this is to install the rng-tools package
+ (check your distro, or download from
+ http://sourceforge.net/projects/gkernel/). rngd periodically reads
+ /dev/hwrng and injects the entropy into /dev/random.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 3db4eb8..b2ff526 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -7,6 +7,7 @@ rng-core-y := core.o
obj-$(CONFIG_HW_RANDOM_TIMERIOMEM) += timeriomem-rng.o
obj-$(CONFIG_HW_RANDOM_INTEL) += intel-rng.o
obj-$(CONFIG_HW_RANDOM_AMD) += amd-rng.o
+obj-$(CONFIG_HW_RANDOM_ATMEL) += atmel-rng.o
obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o
obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o
n2-rng-y := n2-drv.o n2-asm.o
@@ -20,3 +21,4 @@ obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PICOXCELL) += picoxcell-rng.o
+obj-$(CONFIG_HW_RANDOM_PPC4XX) += ppc4xx-rng.o
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 564f6c4..69ae597 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -19,7 +19,7 @@
Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
Added generic RNG API
- Copyright 2006 Michael Buesch <mbuesch@freenet.de>
+ Copyright 2006 Michael Buesch <m@bues.ch>
Copyright 2005 (c) MontaVista Software, Inc.
Please read Documentation/hw_random.txt for details on use.
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index ac6739e..c3de70d 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -1,6 +1,6 @@
/* n2-drv.c: Niagara-2 RNG driver.
*
- * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 2008, 2011 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
@@ -22,8 +22,8 @@
#define DRV_MODULE_NAME "n2rng"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "0.1"
-#define DRV_MODULE_RELDATE "May 15, 2008"
+#define DRV_MODULE_VERSION "0.2"
+#define DRV_MODULE_RELDATE "July 27, 2011"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -623,14 +623,14 @@ static const struct of_device_id n2rng_match[];
static int __devinit n2rng_probe(struct platform_device *op)
{
const struct of_device_id *match;
- int victoria_falls;
+ int multi_capable;
int err = -ENOMEM;
struct n2rng *np;
match = of_match_device(n2rng_match, &op->dev);
if (!match)
return -EINVAL;
- victoria_falls = (match->data != NULL);
+ multi_capable = (match->data != NULL);
n2rng_driver_version();
np = kzalloc(sizeof(*np), GFP_KERNEL);
@@ -640,8 +640,8 @@ static int __devinit n2rng_probe(struct platform_device *op)
INIT_DELAYED_WORK(&np->work, n2rng_work);
- if (victoria_falls)
- np->flags |= N2RNG_FLAG_VF;
+ if (multi_capable)
+ np->flags |= N2RNG_FLAG_MULTI;
err = -ENODEV;
np->hvapi_major = 2;
@@ -658,10 +658,10 @@ static int __devinit n2rng_probe(struct platform_device *op)
}
}
- if (np->flags & N2RNG_FLAG_VF) {
+ if (np->flags & N2RNG_FLAG_MULTI) {
if (np->hvapi_major < 2) {
- dev_err(&op->dev, "VF RNG requires HVAPI major "
- "version 2 or later, got %lu\n",
+ dev_err(&op->dev, "multi-unit-capable RNG requires "
+ "HVAPI major version 2 or later, got %lu\n",
np->hvapi_major);
goto out_hvapi_unregister;
}
@@ -688,8 +688,8 @@ static int __devinit n2rng_probe(struct platform_device *op)
goto out_free_units;
dev_info(&op->dev, "Found %s RNG, units: %d\n",
- ((np->flags & N2RNG_FLAG_VF) ?
- "Victoria Falls" : "Niagara2"),
+ ((np->flags & N2RNG_FLAG_MULTI) ?
+ "multi-unit-capable" : "single-unit"),
np->num_units);
np->hwrng.name = "n2rng";
@@ -751,6 +751,11 @@ static const struct of_device_id n2rng_match[] = {
.compatible = "SUNW,vf-rng",
.data = (void *) 1,
},
+ {
+ .name = "random-number-generator",
+ .compatible = "SUNW,kt-rng",
+ .data = (void *) 1,
+ },
{},
};
MODULE_DEVICE_TABLE(of, n2rng_match);
diff --git a/drivers/char/hw_random/n2rng.h b/drivers/char/hw_random/n2rng.h
index 4bea07f..f244ac8 100644
--- a/drivers/char/hw_random/n2rng.h
+++ b/drivers/char/hw_random/n2rng.h
@@ -68,7 +68,7 @@ struct n2rng {
struct platform_device *op;
unsigned long flags;
-#define N2RNG_FLAG_VF 0x00000001 /* Victoria Falls RNG, else N2 */
+#define N2RNG_FLAG_MULTI 0x00000001 /* Multi-unit capable RNG */
#define N2RNG_FLAG_CONTROL 0x00000002 /* Operating in control domain */
#define N2RNG_FLAG_READY 0x00000008 /* Ready for hw-rng layer */
#define N2RNG_FLAG_SHUTDOWN 0x00000010 /* Driver unregistering */
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index dd1d143..52e08ca 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -55,7 +55,7 @@ static int nmk_rng_probe(struct amba_device *dev, const struct amba_id *id)
ret = amba_request_regions(dev, dev->dev.init_name);
if (ret)
- return ret;
+ goto out_clk;
ret = -ENOMEM;
base = ioremap(dev->res.start, resource_size(&dev->res));
if (!base)
@@ -70,6 +70,7 @@ out_unmap:
iounmap(base);
out_release:
amba_release_regions(dev);
+out_clk:
clk_disable(rng_clk);
clk_put(rng_clk);
return ret;
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 2cc755a..b757fac 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -113,8 +113,10 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
+ if (!res) {
+ ret = -ENOENT;
+ goto err_region;
+ }
if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
ret = -EBUSY;
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index a94e930..a8428e6 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -100,8 +100,7 @@ static int __devinit timeriomem_rng_probe(struct platform_device *pdev)
timeriomem_rng_data = pdev->dev.platform_data;
- timeriomem_rng_data->address = ioremap(res->start,
- res->end - res->start + 1);
+ timeriomem_rng_data->address = ioremap(res->start, resource_size(res));
if (!timeriomem_rng_data->address)
return -EIO;
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index ca7570d..86ed591 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -23,6 +23,7 @@
#include <linux/spinlock.h>
#include <linux/virtio.h>
#include <linux/virtio_rng.h>
+#include <linux/module.h>
static struct virtqueue *vq;
static unsigned int data_avail;
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index 92ce302..263f899 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -352,7 +352,7 @@ static inline void write_all_bytes(struct si_sm_data *bt)
static inline int read_all_bytes(struct si_sm_data *bt)
{
- unsigned char i;
+ unsigned int i;
/*
* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 320668f..34767a6 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -52,7 +52,7 @@
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/delay.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#ifdef CONFIG_X86
/*
@@ -65,6 +65,7 @@
* mechanism for it at that time.
*/
#include <asm/kdebug.h>
+#include <asm/nmi.h>
#define HAVE_DIE_NMI
#endif
@@ -138,6 +139,8 @@
#define IPMI_WDOG_SET_TIMER 0x24
#define IPMI_WDOG_GET_TIMER 0x25
+#define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80
+
/* These are here until the real ones get into the watchdog.h interface. */
#ifndef WDIOC_GETTIMEOUT
#define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int)
@@ -595,6 +598,7 @@ static int ipmi_heartbeat(void)
struct kernel_ipmi_msg msg;
int rv;
struct ipmi_system_interface_addr addr;
+ int timeout_retries = 0;
if (ipmi_ignore_heartbeat)
return 0;
@@ -615,6 +619,7 @@ static int ipmi_heartbeat(void)
mutex_lock(&heartbeat_lock);
+restart:
atomic_set(&heartbeat_tofree, 2);
/*
@@ -652,7 +657,33 @@ static int ipmi_heartbeat(void)
/* Wait for the heartbeat to be sent. */
wait_for_completion(&heartbeat_wait);
- if (heartbeat_recv_msg.msg.data[0] != 0) {
+ if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) {
+ timeout_retries++;
+ if (timeout_retries > 3) {
+ printk(KERN_ERR PFX ": Unable to restore the IPMI"
+ " watchdog's settings, giving up.\n");
+ rv = -EIO;
+ goto out_unlock;
+ }
+
+ /*
+ * The timer was not initialized, that means the BMC was
+ * probably reset and lost the watchdog information. Attempt
+ * to restore the timer's info. Note that we still hold
+ * the heartbeat lock, to keep a heartbeat from happening
+ * in this process, so must say no heartbeat to avoid a
+ * deadlock on this mutex.
+ */
+ rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ if (rv) {
+ printk(KERN_ERR PFX ": Unable to send the command to"
+ " set the watchdog's settings, giving up.\n");
+ goto out_unlock;
+ }
+
+ /* We might need a new heartbeat, so do it now */
+ goto restart;
+ } else if (heartbeat_recv_msg.msg.data[0] != 0) {
/*
* Got an error in the heartbeat response. It was already
* reported in ipmi_wdog_msg_handler, but we should return
@@ -661,6 +692,7 @@ static int ipmi_heartbeat(void)
rv = -EINVAL;
}
+out_unlock:
mutex_unlock(&heartbeat_lock);
return rv;
@@ -921,11 +953,15 @@ static struct miscdevice ipmi_wdog_miscdev = {
static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
void *handler_data)
{
- if (msg->msg.data[0] != 0) {
+ if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
+ msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
+ printk(KERN_INFO PFX "response: The IPMI controller appears"
+ " to have been reset, will attempt to reinitialize"
+ " the watchdog timer\n");
+ else if (msg->msg.data[0] != 0)
printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
msg->msg.data[0],
msg->msg.cmd);
- }
ipmi_free_recv_msg(msg);
}
@@ -1077,17 +1113,8 @@ static void ipmi_unregister_watchdog(int ipmi_intf)
#ifdef HAVE_DIE_NMI
static int
-ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
+ipmi_nmi(unsigned int val, struct pt_regs *regs)
{
- struct die_args *args = data;
-
- if (val != DIE_NMIUNKNOWN)
- return NOTIFY_OK;
-
- /* Hack, if it's a memory or I/O error, ignore it. */
- if (args->err & 0xc0)
- return NOTIFY_OK;
-
/*
* If we get here, it's an NMI that's not a memory or I/O
* error. We can't truly tell if it's from IPMI or not
@@ -1097,15 +1124,15 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
if (testing_nmi) {
testing_nmi = 2;
- return NOTIFY_STOP;
+ return NMI_HANDLED;
}
/* If we are not expecting a timeout, ignore it. */
if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
- return NOTIFY_OK;
+ return NMI_DONE;
if (preaction_val != WDOG_PRETIMEOUT_NMI)
- return NOTIFY_OK;
+ return NMI_DONE;
/*
* If no one else handled the NMI, we assume it was the IPMI
@@ -1120,12 +1147,8 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
panic(PFX "pre-timeout");
}
- return NOTIFY_STOP;
+ return NMI_HANDLED;
}
-
-static struct notifier_block ipmi_nmi_handler = {
- .notifier_call = ipmi_nmi
-};
#endif
static int wdog_reboot_handler(struct notifier_block *this,
@@ -1290,7 +1313,8 @@ static void check_parms(void)
}
}
if (do_nmi && !nmi_handler_registered) {
- rv = register_die_notifier(&ipmi_nmi_handler);
+ rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0,
+ "ipmi");
if (rv) {
printk(KERN_WARNING PFX
"Can't register nmi handler\n");
@@ -1298,7 +1322,7 @@ static void check_parms(void)
} else
nmi_handler_registered = 1;
} else if (!do_nmi && nmi_handler_registered) {
- unregister_die_notifier(&ipmi_nmi_handler);
+ unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
nmi_handler_registered = 0;
}
#endif
@@ -1336,7 +1360,7 @@ static int __init ipmi_wdog_init(void)
if (rv) {
#ifdef HAVE_DIE_NMI
if (nmi_handler_registered)
- unregister_die_notifier(&ipmi_nmi_handler);
+ unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
#endif
atomic_notifier_chain_unregister(&panic_notifier_list,
&wdog_panic_notifier);
@@ -1357,7 +1381,7 @@ static void __exit ipmi_wdog_exit(void)
#ifdef HAVE_DIE_NMI
if (nmi_handler_registered)
- unregister_die_notifier(&ipmi_nmi_handler);
+ unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
#endif
atomic_notifier_chain_unregister(&panic_notifier_list,
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index f6595ab..c9e045c 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -5,7 +5,6 @@
menuconfig TCG_TPM
tristate "TPM Hardware Support"
depends on HAS_IOMEM
- depends on EXPERIMENTAL
select SECURITYFS
---help---
If you have a TPM security chip in your system, which
@@ -43,6 +42,7 @@ config TCG_NSC
config TCG_ATMEL
tristate "Atmel TPM Interface"
+ depends on PPC64 || HAS_IOPORT
---help---
If you have a TPM security chip from Atmel say Yes and it
will be accessible from within Linux. To compile this driver
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 65b9d6f..f7395c4 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -533,10 +533,10 @@ EXPORT_SYMBOL_GPL(tpm_gen_interrupt);
void tpm_get_timeouts(struct tpm_chip *chip)
{
struct tpm_cmd_t tpm_cmd;
- struct timeout_t *timeout_cap;
+ unsigned long new_timeout[4];
+ unsigned long old_timeout[4];
struct duration_t *duration_cap;
ssize_t rc;
- u32 timeout;
tpm_cmd.header.in = tpm_getcap_header;
tpm_cmd.params.getcap_in.cap = TPM_CAP_PROP;
@@ -548,24 +548,51 @@ void tpm_get_timeouts(struct tpm_chip *chip)
if (rc)
goto duration;
- if (be32_to_cpu(tpm_cmd.header.out.length)
- != 4 * sizeof(u32))
- goto duration;
+ if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
+ be32_to_cpu(tpm_cmd.header.out.length)
+ != sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
+ return;
+
+ old_timeout[0] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.a);
+ old_timeout[1] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.b);
+ old_timeout[2] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.c);
+ old_timeout[3] = be32_to_cpu(tpm_cmd.params.getcap_out.cap.timeout.d);
+ memcpy(new_timeout, old_timeout, sizeof(new_timeout));
+
+ /*
+ * Provide ability for vendor overrides of timeout values in case
+ * of misreporting.
+ */
+ if (chip->vendor.update_timeouts != NULL)
+ chip->vendor.timeout_adjusted =
+ chip->vendor.update_timeouts(chip, new_timeout);
+
+ if (!chip->vendor.timeout_adjusted) {
+ /* Don't overwrite default if value is 0 */
+ if (new_timeout[0] != 0 && new_timeout[0] < 1000) {
+ int i;
+
+ /* timeouts in msec rather usec */
+ for (i = 0; i != ARRAY_SIZE(new_timeout); i++)
+ new_timeout[i] *= 1000;
+ chip->vendor.timeout_adjusted = true;
+ }
+ }
+
+ /* Report adjusted timeouts */
+ if (chip->vendor.timeout_adjusted) {
+ dev_info(chip->dev,
+ HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
+ old_timeout[0], new_timeout[0],
+ old_timeout[1], new_timeout[1],
+ old_timeout[2], new_timeout[2],
+ old_timeout[3], new_timeout[3]);
+ }
- timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
- /* Don't overwrite default if value is 0 */
- timeout = be32_to_cpu(timeout_cap->a);
- if (timeout)
- chip->vendor.timeout_a = usecs_to_jiffies(timeout);
- timeout = be32_to_cpu(timeout_cap->b);
- if (timeout)
- chip->vendor.timeout_b = usecs_to_jiffies(timeout);
- timeout = be32_to_cpu(timeout_cap->c);
- if (timeout)
- chip->vendor.timeout_c = usecs_to_jiffies(timeout);
- timeout = be32_to_cpu(timeout_cap->d);
- if (timeout)
- chip->vendor.timeout_d = usecs_to_jiffies(timeout);
+ chip->vendor.timeout_a = usecs_to_jiffies(new_timeout[0]);
+ chip->vendor.timeout_b = usecs_to_jiffies(new_timeout[1]);
+ chip->vendor.timeout_c = usecs_to_jiffies(new_timeout[2]);
+ chip->vendor.timeout_d = usecs_to_jiffies(new_timeout[3]);
duration:
tpm_cmd.header.in = tpm_getcap_header;
@@ -578,23 +605,31 @@ duration:
if (rc)
return;
- if (be32_to_cpu(tpm_cmd.header.out.return_code)
- != 3 * sizeof(u32))
+ if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
+ be32_to_cpu(tpm_cmd.header.out.length)
+ != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
return;
+
duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->vendor.duration[TPM_SHORT] =
usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
+ chip->vendor.duration[TPM_MEDIUM] =
+ usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
+ chip->vendor.duration[TPM_LONG] =
+ usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
+
/* The Broadcom BCM0102 chipset in a Dell Latitude D820 gets the above
* value wrong and apparently reports msecs rather than usecs. So we
* fix up the resulting too-small TPM_SHORT value to make things work.
+ * We also scale the TPM_MEDIUM and -_LONG values by 1000.
*/
- if (chip->vendor.duration[TPM_SHORT] < (HZ/100))
+ if (chip->vendor.duration[TPM_SHORT] < (HZ / 100)) {
chip->vendor.duration[TPM_SHORT] = HZ;
-
- chip->vendor.duration[TPM_MEDIUM] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_medium));
- chip->vendor.duration[TPM_LONG] =
- usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_long));
+ chip->vendor.duration[TPM_MEDIUM] *= 1000;
+ chip->vendor.duration[TPM_LONG] *= 1000;
+ chip->vendor.duration_adjusted = true;
+ dev_info(chip->dev, "Adjusting TPM timeout parameters.");
+ }
}
EXPORT_SYMBOL_GPL(tpm_get_timeouts);
@@ -603,7 +638,7 @@ void tpm_continue_selftest(struct tpm_chip *chip)
u8 data[] = {
0, 193, /* TPM_TAG_RQU_COMMAND */
0, 0, 0, 10, /* length */
- 0, 0, 0, 83, /* TPM_ORD_GetCapability */
+ 0, 0, 0, 83, /* TPM_ORD_ContinueSelfTest */
};
tpm_transmit(chip, data, sizeof(data));
@@ -866,18 +901,24 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
data = tpm_cmd.params.readpubek_out_buffer;
str +=
sprintf(str,
- "Algorithm: %02X %02X %02X %02X\nEncscheme: %02X %02X\n"
- "Sigscheme: %02X %02X\nParameters: %02X %02X %02X %02X"
- " %02X %02X %02X %02X %02X %02X %02X %02X\n"
- "Modulus length: %d\nModulus: \n",
- data[10], data[11], data[12], data[13], data[14],
- data[15], data[16], data[17], data[22], data[23],
- data[24], data[25], data[26], data[27], data[28],
- data[29], data[30], data[31], data[32], data[33],
- be32_to_cpu(*((__be32 *) (data + 34))));
+ "Algorithm: %02X %02X %02X %02X\n"
+ "Encscheme: %02X %02X\n"
+ "Sigscheme: %02X %02X\n"
+ "Parameters: %02X %02X %02X %02X "
+ "%02X %02X %02X %02X "
+ "%02X %02X %02X %02X\n"
+ "Modulus length: %d\n"
+ "Modulus:\n",
+ data[0], data[1], data[2], data[3],
+ data[4], data[5],
+ data[6], data[7],
+ data[12], data[13], data[14], data[15],
+ data[16], data[17], data[18], data[19],
+ data[20], data[21], data[22], data[23],
+ be32_to_cpu(*((__be32 *) (data + 24))));
for (i = 0; i < 256; i++) {
- str += sprintf(str, "%02X ", data[i + 38]);
+ str += sprintf(str, "%02X ", data[i + 28]);
if ((i + 1) % 16 == 0)
str += sprintf(str, "\n");
}
@@ -940,6 +981,38 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
}
EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
+ssize_t tpm_show_durations(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ if (chip->vendor.duration[TPM_LONG] == 0)
+ return 0;
+
+ return sprintf(buf, "%d %d %d [%s]\n",
+ jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
+ jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
+ jiffies_to_usecs(chip->vendor.duration[TPM_LONG]),
+ chip->vendor.duration_adjusted
+ ? "adjusted" : "original");
+}
+EXPORT_SYMBOL_GPL(tpm_show_durations);
+
+ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d %d %d %d [%s]\n",
+ jiffies_to_usecs(chip->vendor.timeout_a),
+ jiffies_to_usecs(chip->vendor.timeout_b),
+ jiffies_to_usecs(chip->vendor.timeout_c),
+ jiffies_to_usecs(chip->vendor.timeout_d),
+ chip->vendor.timeout_adjusted
+ ? "adjusted" : "original");
+}
+EXPORT_SYMBOL_GPL(tpm_show_timeouts);
+
ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -1069,12 +1142,13 @@ ssize_t tpm_read(struct file *file, char __user *buf,
ret_size = atomic_read(&chip->data_pending);
atomic_set(&chip->data_pending, 0);
if (ret_size > 0) { /* relay data */
+ ssize_t orig_ret_size = ret_size;
if (size < ret_size)
ret_size = size;
mutex_lock(&chip->buffer_mutex);
rc = copy_to_user(buf, chip->data_buffer, ret_size);
- memset(chip->data_buffer, 0, ret_size);
+ memset(chip->data_buffer, 0, orig_ret_size);
if (rc)
ret_size = -EFAULT;
@@ -1267,7 +1341,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
/* Make chip available */
spin_lock(&driver_lock);
- list_add_rcu(&chip->list, &tpm_chip_list);
+ list_add_tail_rcu(&chip->list, &tpm_chip_list);
spin_unlock(&driver_lock);
return chip;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 72ddb03..befbe67 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -56,6 +56,10 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr,
char *);
extern ssize_t tpm_show_temp_deactivated(struct device *,
struct device_attribute *attr, char *);
+extern ssize_t tpm_show_durations(struct device *,
+ struct device_attribute *attr, char *);
+extern ssize_t tpm_show_timeouts(struct device *,
+ struct device_attribute *attr, char *);
struct tpm_chip;
@@ -67,6 +71,7 @@ struct tpm_vendor_specific {
unsigned long base; /* TPM base address */
int irq;
+ int probed_irq;
int region_size;
int have_region;
@@ -75,13 +80,18 @@ struct tpm_vendor_specific {
int (*send) (struct tpm_chip *, u8 *, size_t);
void (*cancel) (struct tpm_chip *);
u8 (*status) (struct tpm_chip *);
+ bool (*update_timeouts)(struct tpm_chip *chip,
+ unsigned long *timeout_cap);
+
void (*release) (struct device *);
struct miscdevice miscdev;
struct attribute_group *attr_group;
struct list_head list;
int locality;
unsigned long timeout_a, timeout_b, timeout_c, timeout_d; /* jiffies */
+ bool timeout_adjusted;
unsigned long duration[3]; /* jiffies */
+ bool duration_adjusted;
wait_queue_head_t read_queue;
wait_queue_head_t int_queue;
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index a605cb7..4d24648 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -330,12 +330,12 @@ static int __init init_nsc(void)
pdev->dev.driver = &nsc_drv.driver;
pdev->dev.release = tpm_nsc_remove;
- if ((rc = platform_device_register(pdev)) < 0)
- goto err_free_dev;
+ if ((rc = platform_device_add(pdev)) < 0)
+ goto err_put_dev;
if (request_region(base, 2, "tpm_nsc0") == NULL ) {
rc = -EBUSY;
- goto err_unreg_dev;
+ goto err_del_dev;
}
if (!(chip = tpm_register_hardware(&pdev->dev, &tpm_nsc))) {
@@ -382,10 +382,10 @@ static int __init init_nsc(void)
err_rel_reg:
release_region(base, 2);
-err_unreg_dev:
- platform_device_unregister(pdev);
-err_free_dev:
- kfree(pdev);
+err_del_dev:
+ platform_device_del(pdev);
+err_put_dev:
+ platform_device_put(pdev);
err_unreg_drv:
platform_driver_unregister(&nsc_drv);
return rc;
@@ -396,8 +396,6 @@ static void __exit cleanup_nsc(void)
if (pdev) {
tpm_nsc_remove(&pdev->dev);
platform_device_unregister(pdev);
- kfree(pdev);
- pdev = NULL;
}
platform_driver_unregister(&nsc_drv);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index dd21df5..f8e94fe 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/acpi.h>
+#include <linux/freezer.h>
#include "tpm.h"
#define TPM_HEADER_SIZE 10
@@ -79,7 +80,7 @@ enum tis_defaults {
static LIST_HEAD(tis_chips);
static DEFINE_SPINLOCK(tis_lock);
-#ifdef CONFIG_ACPI
+#if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
static int is_itpm(struct pnp_dev *dev)
{
struct acpi_device *acpi = pnp_acpi_device(dev);
@@ -93,7 +94,7 @@ static int is_itpm(struct pnp_dev *dev)
return 0;
}
#else
-static int is_itpm(struct pnp_dev *dev)
+static inline int is_itpm(struct pnp_dev *dev)
{
return 0;
}
@@ -120,7 +121,7 @@ static void release_locality(struct tpm_chip *chip, int l, int force)
static int request_locality(struct tpm_chip *chip, int l)
{
- unsigned long stop;
+ unsigned long stop, timeout;
long rc;
if (check_locality(chip, l) >= 0)
@@ -129,17 +130,25 @@ static int request_locality(struct tpm_chip *chip, int l)
iowrite8(TPM_ACCESS_REQUEST_USE,
chip->vendor.iobase + TPM_ACCESS(l));
+ stop = jiffies + chip->vendor.timeout_a;
+
if (chip->vendor.irq) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -1;
rc = wait_event_interruptible_timeout(chip->vendor.int_queue,
(check_locality
(chip, l) >= 0),
- chip->vendor.timeout_a);
+ timeout);
if (rc > 0)
return l;
-
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
} else {
/* wait for burstcount */
- stop = jiffies + chip->vendor.timeout_a;
do {
if (check_locality(chip, l) >= 0)
return l;
@@ -196,15 +205,24 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
if ((status & mask) == mask)
return 0;
+ stop = jiffies + timeout;
+
if (chip->vendor.irq) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -ETIME;
rc = wait_event_interruptible_timeout(*queue,
((tpm_tis_status
(chip) & mask) ==
mask), timeout);
if (rc > 0)
return 0;
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
} else {
- stop = jiffies + timeout;
do {
msleep(TPM_TIMEOUT);
status = tpm_tis_status(chip);
@@ -288,11 +306,10 @@ MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
{
int rc, status, burstcnt;
size_t count = 0;
- u32 ordinal;
if (request_locality(chip, 0) < 0)
return -EBUSY;
@@ -327,8 +344,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
/* write last byte */
iowrite8(buf[count],
- chip->vendor.iobase +
- TPM_DATA_FIFO(chip->vendor.locality));
+ chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
&chip->vendor.int_queue);
status = tpm_tis_status(chip);
@@ -337,6 +353,28 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
goto out_err;
}
+ return 0;
+
+out_err:
+ tpm_tis_ready(chip);
+ release_locality(chip, chip->vendor.locality, 0);
+ return rc;
+}
+
+/*
+ * If interrupts are used (signaled by an irq set in the vendor structure)
+ * tpm.c can skip polling for the data to be available as the interrupt is
+ * waited for here
+ */
+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ int rc;
+ u32 ordinal;
+
+ rc = tpm_tis_send_data(chip, buf, len);
+ if (rc < 0)
+ return rc;
+
/* go and do it */
iowrite8(TPM_STS_GO,
chip->vendor.iobase + TPM_STS(chip->vendor.locality));
@@ -358,6 +396,80 @@ out_err:
return rc;
}
+struct tis_vendor_timeout_override {
+ u32 did_vid;
+ unsigned long timeout_us[4];
+};
+
+static const struct tis_vendor_timeout_override vendor_timeout_overrides[] = {
+ /* Atmel 3204 */
+ { 0x32041114, { (TIS_SHORT_TIMEOUT*1000), (TIS_LONG_TIMEOUT*1000),
+ (TIS_SHORT_TIMEOUT*1000), (TIS_SHORT_TIMEOUT*1000) } },
+};
+
+static bool tpm_tis_update_timeouts(struct tpm_chip *chip,
+ unsigned long *timeout_cap)
+{
+ int i;
+ u32 did_vid;
+
+ did_vid = ioread32(chip->vendor.iobase + TPM_DID_VID(0));
+
+ for (i = 0; i != ARRAY_SIZE(vendor_timeout_overrides); i++) {
+ if (vendor_timeout_overrides[i].did_vid != did_vid)
+ continue;
+ memcpy(timeout_cap, vendor_timeout_overrides[i].timeout_us,
+ sizeof(vendor_timeout_overrides[i].timeout_us));
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Early probing for iTPM with STS_DATA_EXPECT flaw.
+ * Try sending command without itpm flag set and if that
+ * fails, repeat with itpm flag set.
+ */
+static int probe_itpm(struct tpm_chip *chip)
+{
+ int rc = 0;
+ u8 cmd_getticks[] = {
+ 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a,
+ 0x00, 0x00, 0x00, 0xf1
+ };
+ size_t len = sizeof(cmd_getticks);
+ int rem_itpm = itpm;
+
+ itpm = 0;
+
+ rc = tpm_tis_send_data(chip, cmd_getticks, len);
+ if (rc == 0)
+ goto out;
+
+ tpm_tis_ready(chip);
+ release_locality(chip, chip->vendor.locality, 0);
+
+ itpm = 1;
+
+ rc = tpm_tis_send_data(chip, cmd_getticks, len);
+ if (rc == 0) {
+ dev_info(chip->dev, "Detected an iTPM.\n");
+ rc = 1;
+ } else
+ rc = -EFAULT;
+
+out:
+ itpm = rem_itpm;
+ tpm_tis_ready(chip);
+ /* some TPMs need a break here otherwise they will not work
+ * correctly on the immediately subsequent command */
+ msleep(chip->vendor.timeout_b);
+ release_locality(chip, chip->vendor.locality, 0);
+
+ return rc;
+}
+
static const struct file_operations tis_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -376,6 +488,8 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
NULL);
static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
+static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
static struct attribute *tis_attrs[] = {
&dev_attr_pubek.attr,
@@ -385,7 +499,9 @@ static struct attribute *tis_attrs[] = {
&dev_attr_owned.attr,
&dev_attr_temp_deactivated.attr,
&dev_attr_caps.attr,
- &dev_attr_cancel.attr, NULL,
+ &dev_attr_cancel.attr,
+ &dev_attr_durations.attr,
+ &dev_attr_timeouts.attr, NULL,
};
static struct attribute_group tis_attr_grp = {
@@ -397,6 +513,7 @@ static struct tpm_vendor_specific tpm_tis = {
.recv = tpm_tis_recv,
.send = tpm_tis_send,
.cancel = tpm_tis_ready,
+ .update_timeouts = tpm_tis_update_timeouts,
.req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
.req_canceled = TPM_STS_COMMAND_READY,
@@ -416,7 +533,7 @@ static irqreturn_t tis_int_probe(int irq, void *dev_id)
if (interrupt == 0)
return IRQ_NONE;
- chip->vendor.irq = irq;
+ chip->vendor.probed_irq = irq;
/* Clear interrupts handled with TPM_EOI */
iowrite32(interrupt,
@@ -464,7 +581,7 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
resource_size_t len, unsigned int irq)
{
u32 vendor, intfcaps, intmask;
- int rc, i;
+ int rc, i, irq_s, irq_e;
struct tpm_chip *chip;
if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
@@ -493,6 +610,14 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
"1.2 TPM (device-id 0x%X, rev-id %d)\n",
vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
+ if (!itpm) {
+ itpm = probe_itpm(chip);
+ if (itpm < 0) {
+ rc = -ENODEV;
+ goto out_err;
+ }
+ }
+
if (itpm)
dev_info(dev, "Intel iTPM workaround enabled\n");
@@ -522,6 +647,9 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
dev_dbg(dev, "\tData Avail Int Support\n");
+ /* get the timeouts before testing for irqs */
+ tpm_get_timeouts(chip);
+
/* INTERRUPT Setup */
init_waitqueue_head(&chip->vendor.read_queue);
init_waitqueue_head(&chip->vendor.int_queue);
@@ -540,13 +668,19 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
if (interrupts)
chip->vendor.irq = irq;
if (interrupts && !chip->vendor.irq) {
- chip->vendor.irq =
+ irq_s =
ioread8(chip->vendor.iobase +
TPM_INT_VECTOR(chip->vendor.locality));
+ if (irq_s) {
+ irq_e = irq_s;
+ } else {
+ irq_s = 3;
+ irq_e = 15;
+ }
- for (i = 3; i < 16 && chip->vendor.irq == 0; i++) {
+ for (i = irq_s; i <= irq_e && chip->vendor.irq == 0; i++) {
iowrite8(i, chip->vendor.iobase +
- TPM_INT_VECTOR(chip->vendor.locality));
+ TPM_INT_VECTOR(chip->vendor.locality));
if (request_irq
(i, tis_int_probe, IRQF_SHARED,
chip->vendor.miscdev.name, chip) != 0) {
@@ -568,9 +702,22 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
+ chip->vendor.probed_irq = 0;
+
/* Generate Interrupts */
tpm_gen_interrupt(chip);
+ chip->vendor.irq = chip->vendor.probed_irq;
+
+ /* free_irq will call into tis_int_probe;
+ clear all irqs we haven't seen while doing
+ tpm_gen_interrupt */
+ iowrite32(ioread32
+ (chip->vendor.iobase +
+ TPM_INT_STATUS(chip->vendor.locality)),
+ chip->vendor.iobase +
+ TPM_INT_STATUS(chip->vendor.locality));
+
/* Turn off */
iowrite32(intmask,
chip->vendor.iobase +
@@ -609,7 +756,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
list_add(&chip->vendor.list, &tis_chips);
spin_unlock(&tis_lock);
- tpm_get_timeouts(chip);
tpm_continue_selftest(chip);
return 0;
@@ -619,6 +765,29 @@ out_err:
tpm_remove_hardware(chip->dev);
return rc;
}
+
+static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
+{
+ u32 intmask;
+
+ /* reenable interrupts that device may have lost or
+ BIOS/firmware may have disabled */
+ iowrite8(chip->vendor.irq, chip->vendor.iobase +
+ TPM_INT_VECTOR(chip->vendor.locality));
+
+ intmask =
+ ioread32(chip->vendor.iobase +
+ TPM_INT_ENABLE(chip->vendor.locality));
+
+ intmask |= TPM_INTF_CMD_READY_INT
+ | TPM_INTF_LOCALITY_CHANGE_INT | TPM_INTF_DATA_AVAIL_INT
+ | TPM_INTF_STS_VALID_INT | TPM_GLOBAL_INT_ENABLE;
+
+ iowrite32(intmask,
+ chip->vendor.iobase + TPM_INT_ENABLE(chip->vendor.locality));
+}
+
+
#ifdef CONFIG_PNP
static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
const struct pnp_device_id *pnp_id)
@@ -650,6 +819,9 @@ static int tpm_tis_pnp_resume(struct pnp_dev *dev)
struct tpm_chip *chip = pnp_get_drvdata(dev);
int ret;
+ if (chip->vendor.irq)
+ tpm_tis_reenable_interrupts(chip);
+
ret = tpm_pm_resume(&dev->dev);
if (!ret)
tpm_continue_selftest(chip);
@@ -702,6 +874,11 @@ static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
static int tpm_tis_resume(struct platform_device *dev)
{
+ struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
+
+ if (chip->vendor.irq)
+ tpm_tis_reenable_interrupts(chip);
+
return tpm_pm_resume(&dev->dev);
}
static struct platform_driver tis_drv = {
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 39ccdea..e90e1c7 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -621,7 +621,7 @@ static int __devinit hwicap_setup(struct device *dev, int id,
drvdata->mem_start = regs_res->start;
drvdata->mem_end = regs_res->end;
- drvdata->mem_size = regs_res->end - regs_res->start + 1;
+ drvdata->mem_size = resource_size(regs_res);
if (!request_mem_region(drvdata->mem_start,
drvdata->mem_size, DRIVER_NAME)) {
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 4168c88..3530927 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -2,3 +2,6 @@
config CLKDEV_LOOKUP
bool
select HAVE_CLK
+
+config HAVE_MACH_CLKDEV
+ bool
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 96c9219..999d6a0 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -1,5 +1,32 @@
config CLKSRC_I8253
bool
+config CLKEVT_I8253
+ bool
+
+config I8253_LOCK
+ bool
+
+config CLKBLD_I8253
+ def_bool y if CLKSRC_I8253 || CLKEVT_I8253 || I8253_LOCK
+
config CLKSRC_MMIO
bool
+
+config DW_APB_TIMER
+ bool
+
+config CLKSRC_DBX500_PRCMU
+ bool "Clocksource PRCMU Timer"
+ depends on UX500_SOC_DB5500 || UX500_SOC_DB8500
+ default y
+ help
+ Use the always on PRCMU Timer as clocksource
+
+config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
+ bool "Clocksource PRCMU Timer sched_clock"
+ depends on (CLKSRC_DBX500_PRCMU && !NOMADIK_MTU_SCHED_CLOCK)
+ select HAVE_SCHED_CLOCK
+ default y
+ help
+ Use the always on PRCMU Timer as sched_clock
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index b995942..8d81a1d 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -6,5 +6,7 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
-obj-$(CONFIG_CLKSRC_I8253) += i8253.o
+obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
+obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
+obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o \ No newline at end of file
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 225c176..27c49e6 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -1,14 +1,25 @@
/*
* i8253 PIT clocksource
*/
-#include <linux/clocksource.h>
+#include <linux/clockchips.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/timex.h>
+#include <linux/module.h>
+#include <linux/i8253.h>
+#include <linux/smp.h>
-#include <asm/i8253.h>
+/*
+ * Protects access to I/O ports
+ *
+ * 0040-0043 : timer0, i8253 / i8254
+ * 0061-0061 : NMI Control Register which contains two speaker control bits.
+ */
+DEFINE_RAW_SPINLOCK(i8253_lock);
+EXPORT_SYMBOL(i8253_lock);
+#ifdef CONFIG_CLKSRC_I8253
/*
* Since the PIT overflows every tick, its not very useful
* to just read by itself. So use jiffies to emulate a free
@@ -37,15 +48,15 @@ static cycle_t i8253_read(struct clocksource *cs)
* count), it cannot be newer.
*/
jifs = jiffies;
- outb_pit(0x00, PIT_MODE); /* latch the count ASAP */
- count = inb_pit(PIT_CH0); /* read the latched count */
- count |= inb_pit(PIT_CH0) << 8;
+ outb_p(0x00, PIT_MODE); /* latch the count ASAP */
+ count = inb_p(PIT_CH0); /* read the latched count */
+ count |= inb_p(PIT_CH0) << 8;
/* VIA686a test code... reset the latch if count > max + 1 */
if (count > LATCH) {
- outb_pit(0x34, PIT_MODE);
- outb_pit(PIT_LATCH & 0xff, PIT_CH0);
- outb_pit(PIT_LATCH >> 8, PIT_CH0);
+ outb_p(0x34, PIT_MODE);
+ outb_p(PIT_LATCH & 0xff, PIT_CH0);
+ outb_p(PIT_LATCH >> 8, PIT_CH0);
count = PIT_LATCH - 1;
}
@@ -86,3 +97,90 @@ int __init clocksource_i8253_init(void)
{
return clocksource_register_hz(&i8253_cs, PIT_TICK_RATE);
}
+#endif
+
+#ifdef CONFIG_CLKEVT_I8253
+/*
+ * Initialize the PIT timer.
+ *
+ * This is also called after resume to bring the PIT into operation again.
+ */
+static void init_pit_timer(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ raw_spin_lock(&i8253_lock);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ /* binary, mode 2, LSB/MSB, ch 0 */
+ outb_p(0x34, PIT_MODE);
+ outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
+ outb_p(LATCH >> 8 , PIT_CH0); /* MSB */
+ break;
+
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ if (evt->mode == CLOCK_EVT_MODE_PERIODIC ||
+ evt->mode == CLOCK_EVT_MODE_ONESHOT) {
+ outb_p(0x30, PIT_MODE);
+ outb_p(0, PIT_CH0);
+ outb_p(0, PIT_CH0);
+ }
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* One shot setup */
+ outb_p(0x38, PIT_MODE);
+ break;
+
+ case CLOCK_EVT_MODE_RESUME:
+ /* Nothing to do here */
+ break;
+ }
+ raw_spin_unlock(&i8253_lock);
+}
+
+/*
+ * Program the next event in oneshot mode
+ *
+ * Delta is given in PIT ticks
+ */
+static int pit_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+ raw_spin_lock(&i8253_lock);
+ outb_p(delta & 0xff , PIT_CH0); /* LSB */
+ outb_p(delta >> 8 , PIT_CH0); /* MSB */
+ raw_spin_unlock(&i8253_lock);
+
+ return 0;
+}
+
+/*
+ * On UP the PIT can serve all of the possible timer functions. On SMP systems
+ * it can be solely used for the global tick.
+ */
+struct clock_event_device i8253_clockevent = {
+ .name = "pit",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .set_mode = init_pit_timer,
+ .set_next_event = pit_next_event,
+};
+
+/*
+ * Initialize the conversion factor and the min/max deltas of the clock event
+ * structure and register the clock event source with the framework.
+ */
+void __init clockevent_i8253_init(bool oneshot)
+{
+ if (oneshot)
+ i8253_clockevent.features |= CLOCK_EVT_FEAT_ONESHOT;
+ /*
+ * Start pit with the boot cpu mask. x86 might make it global
+ * when it is used as broadcast device later.
+ */
+ i8253_clockevent.cpumask = cpumask_of(smp_processor_id());
+
+ clockevents_config_and_register(&i8253_clockevent, PIT_TICK_RATE,
+ 0xF, 0x7FFF);
+}
+#endif
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index dc7c033..ca09bc4 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -26,10 +26,12 @@
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/err.h>
+#include <linux/delay.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/sh_timer.h>
#include <linux/slab.h>
+#include <linux/module.h>
struct sh_cmt_priv {
void __iomem *mapbase;
@@ -150,13 +152,13 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
{
- int ret;
+ int k, ret;
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
- return ret;
+ goto err0;
}
/* make sure channel is disabled */
@@ -174,9 +176,38 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
sh_cmt_write(p, CMCOR, 0xffffffff);
sh_cmt_write(p, CMCNT, 0);
+ /*
+ * According to the sh73a0 user's manual, as CMCNT can be operated
+ * only by the RCLK (Pseudo 32 KHz), there's one restriction on
+ * modifying CMCNT register; two RCLK cycles are necessary before
+ * this register is either read or any modification of the value
+ * it holds is reflected in the LSI's actual operation.
+ *
+ * While at it, we're supposed to clear out the CMCNT as of this
+ * moment, so make sure it's processed properly here. This will
+ * take RCLKx2 at maximum.
+ */
+ for (k = 0; k < 100; k++) {
+ if (!sh_cmt_read(p, CMCNT))
+ break;
+ udelay(1);
+ }
+
+ if (sh_cmt_read(p, CMCNT)) {
+ dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
+ ret = -ETIMEDOUT;
+ goto err1;
+ }
+
/* enable channel */
sh_cmt_start_stop_ch(p, 1);
return 0;
+ err1:
+ /* stop clock */
+ clk_disable(p->clk);
+
+ err0:
+ return ret;
}
static void sh_cmt_disable(struct sh_cmt_priv *p)
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 40630cb..db8d595 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -30,6 +30,7 @@
#include <linux/clockchips.h>
#include <linux/sh_timer.h>
#include <linux/slab.h>
+#include <linux/module.h>
struct sh_mtu2_priv {
void __iomem *mapbase;
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 8081357..079e96a 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -31,6 +31,7 @@
#include <linux/clockchips.h>
#include <linux/sh_timer.h>
#include <linux/slab.h>
+#include <linux/module.h>
struct sh_tmu_priv {
void __iomem *mapbase;
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 2b46a7e..094a710 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -28,12 +28,26 @@
#include <linux/init.h>
#include <linux/connector.h>
#include <linux/gfp.h>
-#include <asm/atomic.h>
-#include <asm/unaligned.h>
+#include <linux/ptrace.h>
+#include <linux/atomic.h>
#include <linux/cn_proc.h>
-#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event))
+/*
+ * Size of a cn_msg followed by a proc_event structure. Since the
+ * sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we
+ * add one 4-byte word to the size here, and then start the actual
+ * cn_msg structure 4 bytes into the stack buffer. The result is that
+ * the immediately following proc_event structure is aligned to 8 bytes.
+ */
+#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4)
+
+/* See comment above; we test our assumption about sizeof struct cn_msg here. */
+static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
+{
+ BUILD_BUG_ON(sizeof(struct cn_msg) != 20);
+ return (struct cn_msg *)(buffer + 4);
+}
static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
@@ -53,26 +67,32 @@ void proc_fork_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
- __u8 buffer[CN_PROC_MSG_SIZE];
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
struct timespec ts;
+ struct task_struct *parent;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg*)buffer;
+ msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event*)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_FORK;
- ev->event_data.fork.parent_pid = task->real_parent->pid;
- ev->event_data.fork.parent_tgid = task->real_parent->tgid;
+ rcu_read_lock();
+ parent = rcu_dereference(task->real_parent);
+ ev->event_data.fork.parent_pid = parent->pid;
+ ev->event_data.fork.parent_tgid = parent->tgid;
+ rcu_read_unlock();
ev->event_data.fork.child_pid = task->pid;
ev->event_data.fork.child_tgid = task->tgid;
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
/* If cn_netlink_send() failed, the data is not sent */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -82,16 +102,17 @@ void proc_exec_connector(struct task_struct *task)
struct cn_msg *msg;
struct proc_event *ev;
struct timespec ts;
- __u8 buffer[CN_PROC_MSG_SIZE];
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg*)buffer;
+ msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event*)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_EXEC;
ev->event_data.exec.process_pid = task->pid;
ev->event_data.exec.process_tgid = task->tgid;
@@ -99,6 +120,7 @@ void proc_exec_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -106,15 +128,16 @@ void proc_id_connector(struct task_struct *task, int which_id)
{
struct cn_msg *msg;
struct proc_event *ev;
- __u8 buffer[CN_PROC_MSG_SIZE];
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
struct timespec ts;
const struct cred *cred;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg*)buffer;
+ msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event*)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
ev->what = which_id;
ev->event_data.id.process_pid = task->pid;
ev->event_data.id.process_tgid = task->tgid;
@@ -133,11 +156,12 @@ void proc_id_connector(struct task_struct *task, int which_id)
rcu_read_unlock();
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->timestamp_ns = timespec_to_ns(&ts);
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -146,16 +170,17 @@ void proc_sid_connector(struct task_struct *task)
struct cn_msg *msg;
struct proc_event *ev;
struct timespec ts;
- __u8 buffer[CN_PROC_MSG_SIZE];
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg *)buffer;
+ msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_SID;
ev->event_data.sid.process_pid = task->pid;
ev->event_data.sid.process_tgid = task->tgid;
@@ -163,6 +188,70 @@ void proc_sid_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+}
+
+void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
+{
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_PTRACE;
+ ev->event_data.ptrace.process_pid = task->pid;
+ ev->event_data.ptrace.process_tgid = task->tgid;
+ if (ptrace_id == PTRACE_ATTACH) {
+ ev->event_data.ptrace.tracer_pid = current->pid;
+ ev->event_data.ptrace.tracer_tgid = current->tgid;
+ } else if (ptrace_id == PTRACE_DETACH) {
+ ev->event_data.ptrace.tracer_pid = 0;
+ ev->event_data.ptrace.tracer_tgid = 0;
+ } else
+ return;
+
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+}
+
+void proc_comm_connector(struct task_struct *task)
+{
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+ msg = buffer_to_cn_msg(buffer);
+ ev = (struct proc_event *)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ ev->timestamp_ns = timespec_to_ns(&ts);
+ ev->what = PROC_EVENT_COMM;
+ ev->event_data.comm.process_pid = task->pid;
+ ev->event_data.comm.process_tgid = task->tgid;
+ get_task_comm(ev->event_data.comm.comm, task);
+
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -170,17 +259,18 @@ void proc_exit_connector(struct task_struct *task)
{
struct cn_msg *msg;
struct proc_event *ev;
- __u8 buffer[CN_PROC_MSG_SIZE];
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
struct timespec ts;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg*)buffer;
+ msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event*)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->timestamp_ns = timespec_to_ns(&ts);
ev->what = PROC_EVENT_EXIT;
ev->event_data.exit.process_pid = task->pid;
ev->event_data.exit.process_tgid = task->tgid;
@@ -190,6 +280,7 @@ void proc_exit_connector(struct task_struct *task)
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -205,23 +296,25 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
{
struct cn_msg *msg;
struct proc_event *ev;
- __u8 buffer[CN_PROC_MSG_SIZE];
+ __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
struct timespec ts;
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg*)buffer;
+ msg = buffer_to_cn_msg(buffer);
ev = (struct proc_event*)msg->data;
+ memset(&ev->event_data, 0, sizeof(ev->event_data));
msg->seq = rcvd_seq;
ktime_get_ts(&ts); /* get high res monotonic timestamp */
- put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->timestamp_ns = timespec_to_ns(&ts);
ev->cpu = -1;
ev->what = PROC_EVENT_NONE;
ev->event_data.ack.err = err;
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = rcvd_ack + 1;
msg->len = sizeof(*ev);
+ msg->flags = 0; /* not used */
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
@@ -238,6 +331,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
if (msg->len != sizeof(*mc_op))
return;
+ /* Can only change if privileged. */
+ if (!capable(CAP_NET_ADMIN)) {
+ err = EPERM;
+ goto out;
+ }
+
mc_op = (enum proc_cn_mcast_op*)msg->data;
switch (*mc_op) {
case PROC_CN_MCAST_LISTEN:
@@ -250,6 +349,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
err = EINVAL;
break;
}
+
+out:
cn_proc_ack(err, msg->seq, msg->ack);
}
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index dde6a0f..ea6efe8 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb)
static void cn_rx_skb(struct sk_buff *__skb)
{
struct nlmsghdr *nlh;
- int err;
struct sk_buff *skb;
+ int len, err;
skb = skb_get(__skb);
if (skb->len >= NLMSG_SPACE(0)) {
nlh = nlmsg_hdr(skb);
+ len = nlmsg_len(nlh);
- if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
+ if (len < (int)sizeof(struct cn_msg) ||
skb->len < nlh->nlmsg_len ||
- nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) {
+ len > CONNECTOR_MAX_MSG_SIZE) {
kfree_skb(skb);
return;
}
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 676d957..4159265 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -62,10 +62,22 @@
#define CAAM_MAX_IV_LENGTH 16
/* length of descriptors text */
-#define DESC_AEAD_SHARED_TEXT_LEN 4
-#define DESC_AEAD_ENCRYPT_TEXT_LEN 21
-#define DESC_AEAD_DECRYPT_TEXT_LEN 24
-#define DESC_AEAD_GIVENCRYPT_TEXT_LEN 27
+#define DESC_JOB_IO_LEN (CAAM_CMD_SZ * 3 + CAAM_PTR_SZ * 3)
+
+#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
+#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+
+#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
+ 20 * CAAM_CMD_SZ)
+#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
+ 15 * CAAM_CMD_SZ)
+
+#define DESC_MAX_USED_BYTES (DESC_AEAD_GIVENC_LEN + \
+ CAAM_MAX_KEY_SIZE)
+#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
#ifdef DEBUG
/* for print_hex_dumps with line references */
@@ -76,30 +88,366 @@
#define debug(format, arg...)
#endif
+/* Set DK bit in class 1 operation if shared */
+static inline void append_dec_op1(u32 *desc, u32 type)
+{
+ u32 *jump_cmd, *uncond_jump_cmd;
+
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT);
+ uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+ append_operation(desc, type | OP_ALG_AS_INITFINAL |
+ OP_ALG_DECRYPT | OP_ALG_AAI_DK);
+ set_jump_tgt_here(desc, uncond_jump_cmd);
+}
+
+/*
+ * Wait for completion of class 1 key loading before allowing
+ * error propagation
+ */
+static inline void append_dec_shr_done(u32 *desc)
+{
+ u32 *jump_cmd;
+
+ jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
+ append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+}
+
+/*
+ * For aead functions, read payload and write payload,
+ * both of which are specified in req->src and req->dst
+ */
+static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
+{
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
+ KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+}
+
+/*
+ * For aead encrypt and decrypt, read iv for both classes
+ */
+static inline void aead_append_ld_iv(u32 *desc, int ivsize)
+{
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | ivsize);
+ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
+}
+
+/*
+ * For ablkcipher encrypt and decrypt, read from req->src and
+ * write to req->dst
+ */
+static inline void ablkcipher_append_src_dst(u32 *desc)
+{
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); \
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | \
+ KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); \
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); \
+}
+
+/*
+ * If all data, including src (with assoc and iv) or dst (with iv only) are
+ * contiguous
+ */
+#define GIV_SRC_CONTIG 1
+#define GIV_DST_CONTIG (1 << 1)
+
/*
* per-session context
*/
struct caam_ctx {
struct device *jrdev;
- u32 *sh_desc;
- dma_addr_t shared_desc_phys;
+ u32 sh_desc_enc[DESC_MAX_USED_LEN];
+ u32 sh_desc_dec[DESC_MAX_USED_LEN];
+ u32 sh_desc_givenc[DESC_MAX_USED_LEN];
+ dma_addr_t sh_desc_enc_dma;
+ dma_addr_t sh_desc_dec_dma;
+ dma_addr_t sh_desc_givenc_dma;
u32 class1_alg_type;
u32 class2_alg_type;
u32 alg_op;
- u8 *key;
- dma_addr_t key_phys;
+ u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t key_dma;
unsigned int enckeylen;
unsigned int split_key_len;
unsigned int split_key_pad_len;
unsigned int authsize;
};
-static int aead_authenc_setauthsize(struct crypto_aead *authenc,
+static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
+ int keys_fit_inline)
+{
+ if (keys_fit_inline) {
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key_as_imm(desc, (void *)ctx->key +
+ ctx->split_key_pad_len, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ } else {
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
+ ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ }
+}
+
+static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
+ int keys_fit_inline)
+{
+ u32 *key_jump_cmd;
+
+ init_sh_desc(desc, HDR_SHARE_WAIT);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ append_key_aead(desc, ctx, keys_fit_inline);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Propagate errors from shared to job descriptor */
+ append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+}
+
+static int aead_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = 0;
+ u32 *key_jump_cmd, *jump_cmd;
+ u32 geniv, moveiv;
+ u32 *desc;
+
+ if (!ctx->enckeylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = 1;
+
+ /* aead_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen + cryptlen = seqinlen - ivsize */
+ append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
+
+ /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+ aead_append_ld_iv(desc, tfm->ivsize);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead enc shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = 1;
+
+ desc = ctx->sh_desc_dec;
+
+ /* aead_decrypt shared descriptor */
+ init_sh_desc(desc, HDR_SHARE_WAIT);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ append_key_aead(desc, ctx, keys_fit_inline);
+
+ /* Only propagate error immediately if shared */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, key_jump_cmd);
+ append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqinlen - ivsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+ ctx->authsize + tfm->ivsize)
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ aead_append_ld_iv(desc, tfm->ivsize);
+
+ append_dec_op1(desc, ctx->class1_alg_type);
+
+ /* Read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+ append_dec_shr_done(desc);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead dec shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len + ctx->enckeylen <=
+ CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = 1;
+
+ /* aead_givencrypt shared descriptor */
+ desc = ctx->sh_desc_givenc;
+
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+
+ /* Generate IV */
+ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+ NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+ NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO |
+ MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Copy IV to class 1 context */
+ append_move(desc, MOVE_SRC_CLASS1CTX |
+ MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
+
+ /* Return to encryption */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* ivsize + cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /* assoclen = seqinlen - (ivsize + cryptlen) */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Copy iv from class 1 ctx to class 2 fifo*/
+ moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+ append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
+ LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+ append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
+
+ /* Class 1 operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Will write ivsize + cryptlen */
+ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+ /* Not need to reload iv */
+ append_seq_fifo_load(desc, tfm->ivsize,
+ FIFOLD_CLASS_SKIP);
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead givenc shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
+}
+
+static int aead_setauthsize(struct crypto_aead *authenc,
unsigned int authsize)
{
struct caam_ctx *ctx = crypto_aead_ctx(authenc);
ctx->authsize = authsize;
+ aead_set_sh_desc(authenc);
return 0;
}
@@ -117,6 +465,7 @@ static void split_key_done(struct device *dev, u32 *desc, u32 err,
#ifdef DEBUG
dev_err(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
+
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
@@ -220,73 +569,7 @@ static u32 gen_split_key(struct caam_ctx *ctx, const u8 *key_in, u32 authkeylen)
return ret;
}
-static int build_sh_desc_ipsec(struct caam_ctx *ctx)
-{
- struct device *jrdev = ctx->jrdev;
- u32 *sh_desc;
- u32 *jump_cmd;
- bool keys_fit_inline = 0;
-
- /*
- * largest Job Descriptor and its Shared Descriptor
- * must both fit into the 64-word Descriptor h/w Buffer
- */
- if ((DESC_AEAD_GIVENCRYPT_TEXT_LEN +
- DESC_AEAD_SHARED_TEXT_LEN) * CAAM_CMD_SZ +
- ctx->split_key_pad_len + ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
- keys_fit_inline = 1;
-
- /* build shared descriptor for this session */
- sh_desc = kmalloc(CAAM_CMD_SZ * DESC_AEAD_SHARED_TEXT_LEN +
- (keys_fit_inline ?
- ctx->split_key_pad_len + ctx->enckeylen :
- CAAM_PTR_SZ * 2), GFP_DMA | GFP_KERNEL);
- if (!sh_desc) {
- dev_err(jrdev, "could not allocate shared descriptor\n");
- return -ENOMEM;
- }
-
- init_sh_desc(sh_desc, HDR_SAVECTX | HDR_SHARE_SERIAL);
-
- jump_cmd = append_jump(sh_desc, CLASS_BOTH | JUMP_TEST_ALL |
- JUMP_COND_SHRD | JUMP_COND_SELF);
-
- /*
- * process keys, starting with class 2/authentication.
- */
- if (keys_fit_inline) {
- append_key_as_imm(sh_desc, ctx->key, ctx->split_key_pad_len,
- ctx->split_key_len,
- CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC);
-
- append_key_as_imm(sh_desc, (void *)ctx->key +
- ctx->split_key_pad_len, ctx->enckeylen,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- } else {
- append_key(sh_desc, ctx->key_phys, ctx->split_key_len, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- append_key(sh_desc, ctx->key_phys + ctx->split_key_pad_len,
- ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
- }
-
- /* update jump cmd now that we are at the jump target */
- set_jump_tgt_here(sh_desc, jump_cmd);
-
- ctx->shared_desc_phys = dma_map_single(jrdev, sh_desc,
- desc_bytes(sh_desc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->shared_desc_phys)) {
- dev_err(jrdev, "unable to map shared descriptor\n");
- kfree(sh_desc);
- return -ENOMEM;
- }
-
- ctx->sh_desc = sh_desc;
-
- return 0;
-}
-
-static int aead_authenc_setkey(struct crypto_aead *aead,
+static int aead_setkey(struct crypto_aead *aead,
const u8 *key, unsigned int keylen)
{
/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
@@ -326,27 +609,19 @@ static int aead_authenc_setkey(struct crypto_aead *aead,
print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
#endif
- ctx->key = kmalloc(ctx->split_key_pad_len + enckeylen,
- GFP_KERNEL | GFP_DMA);
- if (!ctx->key) {
- dev_err(jrdev, "could not allocate key output memory\n");
- return -ENOMEM;
- }
ret = gen_split_key(ctx, key, authkeylen);
if (ret) {
- kfree(ctx->key);
goto badkey;
}
/* postpend encryption key to auth split key */
memcpy(ctx->key + ctx->split_key_pad_len, key + authkeylen, enckeylen);
- ctx->key_phys = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
enckeylen, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, ctx->key_phys)) {
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
dev_err(jrdev, "unable to map key i/o memory\n");
- kfree(ctx->key);
return -ENOMEM;
}
#ifdef DEBUG
@@ -357,11 +632,10 @@ static int aead_authenc_setkey(struct crypto_aead *aead,
ctx->enckeylen = enckeylen;
- ret = build_sh_desc_ipsec(ctx);
+ ret = aead_set_sh_desc(aead);
if (ret) {
- dma_unmap_single(jrdev, ctx->key_phys, ctx->split_key_pad_len +
+ dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
enckeylen, DMA_TO_DEVICE);
- kfree(ctx->key);
}
return ret;
@@ -370,6 +644,119 @@ badkey:
return -EINVAL;
}
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
+ struct device *jrdev = ctx->jrdev;
+ int ret = 0;
+ u32 *key_jump_cmd, *jump_cmd;
+ u32 *desc;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "key in @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+ memcpy(ctx->key, key, keylen);
+ ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->key_dma)) {
+ dev_err(jrdev, "unable to map key i/o memory\n");
+ return -ENOMEM;
+ }
+ ctx->enckeylen = keylen;
+
+ /* ablkcipher_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+ init_sh_desc(desc, HDR_SHARE_WAIT);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Propagate errors from shared to job descriptor */
+ append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+
+ /* Load iv */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | tfm->ivsize);
+
+ /* Load operation */
+ append_operation(desc, ctx->class1_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher enc shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+ /* ablkcipher_decrypt shared descriptor */
+ desc = ctx->sh_desc_dec;
+
+ init_sh_desc(desc, HDR_SHARE_WAIT);
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ /* Load class1 key only */
+ append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+ ctx->enckeylen, CLASS_1 |
+ KEY_DEST_CLASS_REG);
+
+ /* For aead, only propagate error immediately if shared */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, key_jump_cmd);
+ append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+ set_jump_tgt_here(desc, jump_cmd);
+
+ /* load IV */
+ append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB | tfm->ivsize);
+
+ /* Choose operation */
+ append_dec_op1(desc, ctx->class1_alg_type);
+
+ /* Perform operation */
+ ablkcipher_append_src_dst(desc);
+
+ /* Wait for key to load before allowing propagating error */
+ append_dec_shr_done(desc);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher dec shdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return ret;
+}
+
struct link_tbl_entry {
u64 ptr;
u32 len;
@@ -379,64 +766,109 @@ struct link_tbl_entry {
};
/*
- * ipsec_esp_edesc - s/w-extended ipsec_esp descriptor
+ * aead_edesc - s/w-extended aead descriptor
+ * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
- * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
* @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
* @link_tbl_bytes: length of dma mapped link_tbl space
* @link_tbl_dma: bus physical mapped address of h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
*/
-struct ipsec_esp_edesc {
+struct aead_edesc {
int assoc_nents;
int src_nents;
int dst_nents;
+ dma_addr_t iv_dma;
int link_tbl_bytes;
dma_addr_t link_tbl_dma;
struct link_tbl_entry *link_tbl;
u32 hw_desc[0];
};
-static void ipsec_esp_unmap(struct device *dev,
- struct ipsec_esp_edesc *edesc,
- struct aead_request *areq)
-{
- dma_unmap_sg(dev, areq->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
+/*
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
+ * @link_tbl_bytes: length of dma mapped link_tbl space
+ * @link_tbl_dma: bus physical mapped address of h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+struct ablkcipher_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int link_tbl_bytes;
+ dma_addr_t link_tbl_dma;
+ struct link_tbl_entry *link_tbl;
+ u32 hw_desc[0];
+};
- if (unlikely(areq->dst != areq->src)) {
- dma_unmap_sg(dev, areq->src, edesc->src_nents,
- DMA_TO_DEVICE);
- dma_unmap_sg(dev, areq->dst, edesc->dst_nents,
- DMA_FROM_DEVICE);
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+ struct scatterlist *dst, int src_nents, int dst_nents,
+ dma_addr_t iv_dma, int ivsize, dma_addr_t link_tbl_dma,
+ int link_tbl_bytes)
+{
+ if (unlikely(dst != src)) {
+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
} else {
- dma_unmap_sg(dev, areq->src, edesc->src_nents,
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
}
- if (edesc->link_tbl_bytes)
- dma_unmap_single(dev, edesc->link_tbl_dma,
- edesc->link_tbl_bytes,
+ if (iv_dma)
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+ if (link_tbl_bytes)
+ dma_unmap_single(dev, link_tbl_dma, link_tbl_bytes,
DMA_TO_DEVICE);
}
-/*
- * ipsec_esp descriptor callbacks
- */
-static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+static void aead_unmap(struct device *dev,
+ struct aead_edesc *edesc,
+ struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ivsize = crypto_aead_ivsize(aead);
+
+ dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE);
+
+ caam_unmap(dev, req->src, req->dst,
+ edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->link_tbl_dma,
+ edesc->link_tbl_bytes);
+}
+
+static void ablkcipher_unmap(struct device *dev,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ caam_unmap(dev, req->src, req->dst,
+ edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->link_tbl_dma,
+ edesc->link_tbl_bytes);
+}
+
+static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
- struct aead_request *areq = context;
- struct ipsec_esp_edesc *edesc;
+ struct aead_request *req = context;
+ struct aead_edesc *edesc;
#ifdef DEBUG
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
- int ivsize = crypto_aead_ivsize(aead);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ipsec_esp_edesc *)((char *)desc -
- offsetof(struct ipsec_esp_edesc, hw_desc));
+
+ edesc = (struct aead_edesc *)((char *)desc -
+ offsetof(struct aead_edesc, hw_desc));
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
@@ -444,39 +876,50 @@ static void ipsec_esp_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
- ipsec_esp_unmap(jrdev, edesc, areq);
+ aead_unmap(jrdev, edesc, req);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc),
- areq->assoclen , 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize,
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src) - ivsize,
edesc->src_nents ? 100 : ivsize, 1);
print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src),
- edesc->src_nents ? 100 : areq->cryptlen +
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->cryptlen +
ctx->authsize + 4, 1);
#endif
kfree(edesc);
- aead_request_complete(areq, err);
+ aead_request_complete(req, err);
}
-static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
void *context)
{
- struct aead_request *areq = context;
- struct ipsec_esp_edesc *edesc;
+ struct aead_request *req = context;
+ struct aead_edesc *edesc;
#ifdef DEBUG
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = (struct ipsec_esp_edesc *)((char *)desc -
- offsetof(struct ipsec_esp_edesc, hw_desc));
+
+ edesc = (struct aead_edesc *)((char *)desc -
+ offsetof(struct aead_edesc, hw_desc));
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->dst),
+ req->cryptlen, 1);
+#endif
if (err) {
char tmp[CAAM_ERROR_STR_MAX];
@@ -484,7 +927,7 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
}
- ipsec_esp_unmap(jrdev, edesc, areq);
+ aead_unmap(jrdev, edesc, req);
/*
* verify hw auth check passed else return -EBADMSG
@@ -495,255 +938,413 @@ static void ipsec_esp_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
#ifdef DEBUG
print_hex_dump(KERN_ERR, "iphdrout@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4,
- ((char *)sg_virt(areq->assoc) - sizeof(struct iphdr)),
- sizeof(struct iphdr) + areq->assoclen +
- ((areq->cryptlen > 1500) ? 1500 : areq->cryptlen) +
+ ((char *)sg_virt(req->assoc) - sizeof(struct iphdr)),
+ sizeof(struct iphdr) + req->assoclen +
+ ((req->cryptlen > 1500) ? 1500 : req->cryptlen) +
ctx->authsize + 36, 1);
if (!err && edesc->link_tbl_bytes) {
- struct scatterlist *sg = sg_last(areq->src, edesc->src_nents);
+ struct scatterlist *sg = sg_last(req->src, edesc->src_nents);
print_hex_dump(KERN_ERR, "sglastout@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(sg),
sg->length + ctx->authsize + 16, 1);
}
#endif
+
kfree(edesc);
- aead_request_complete(areq, err);
+ aead_request_complete(req, err);
+}
+
+static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ablkcipher_request *req = context;
+ struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ablkcipher_edesc *)((char *)desc -
+ offsetof(struct ablkcipher_edesc, hw_desc));
+
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+
+ ablkcipher_request_complete(req, err);
+}
+
+static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
+{
+ struct ablkcipher_request *req = context;
+ struct ablkcipher_edesc *edesc;
+#ifdef DEBUG
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+ dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+#endif
+
+ edesc = (struct ablkcipher_edesc *)((char *)desc -
+ offsetof(struct ablkcipher_edesc, hw_desc));
+ if (err) {
+ char tmp[CAAM_ERROR_STR_MAX];
+
+ dev_err(jrdev, "%08x: %s\n", err, caam_jr_strstatus(tmp, err));
+ }
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dstiv @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "dst @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+#endif
+
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+
+ ablkcipher_request_complete(req, err);
+}
+
+static void sg_to_link_tbl_one(struct link_tbl_entry *link_tbl_ptr,
+ dma_addr_t dma, u32 len, u32 offset)
+{
+ link_tbl_ptr->ptr = dma;
+ link_tbl_ptr->len = len;
+ link_tbl_ptr->reserved = 0;
+ link_tbl_ptr->buf_pool_id = 0;
+ link_tbl_ptr->offset = offset;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "link_tbl_ptr@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, link_tbl_ptr,
+ sizeof(struct link_tbl_entry), 1);
+#endif
}
/*
* convert scatterlist to h/w link table format
- * scatterlist must have been previously dma mapped
+ * but does not have final bit; instead, returns last entry
*/
-static void sg_to_link_tbl(struct scatterlist *sg, int sg_count,
- struct link_tbl_entry *link_tbl_ptr, u32 offset)
+static struct link_tbl_entry *sg_to_link_tbl(struct scatterlist *sg,
+ int sg_count, struct link_tbl_entry
+ *link_tbl_ptr, u32 offset)
{
while (sg_count) {
- link_tbl_ptr->ptr = sg_dma_address(sg);
- link_tbl_ptr->len = sg_dma_len(sg);
- link_tbl_ptr->reserved = 0;
- link_tbl_ptr->buf_pool_id = 0;
- link_tbl_ptr->offset = offset;
+ sg_to_link_tbl_one(link_tbl_ptr, sg_dma_address(sg),
+ sg_dma_len(sg), offset);
link_tbl_ptr++;
sg = sg_next(sg);
sg_count--;
}
+ return link_tbl_ptr - 1;
+}
- /* set Final bit (marks end of link table) */
- link_tbl_ptr--;
+/*
+ * convert scatterlist to h/w link table format
+ * scatterlist must have been previously dma mapped
+ */
+static void sg_to_link_tbl_last(struct scatterlist *sg, int sg_count,
+ struct link_tbl_entry *link_tbl_ptr, u32 offset)
+{
+ link_tbl_ptr = sg_to_link_tbl(sg, sg_count, link_tbl_ptr, offset);
link_tbl_ptr->len |= 0x40000000;
}
/*
- * fill in and submit ipsec_esp job descriptor
+ * Fill in aead job descriptor
*/
-static int ipsec_esp(struct ipsec_esp_edesc *edesc, struct aead_request *areq,
- u32 encrypt,
- void (*callback) (struct device *dev, u32 *desc,
- u32 err, void *context))
+static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
+ struct aead_edesc *edesc,
+ struct aead_request *req,
+ bool all_contig, bool encrypt)
{
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
- struct device *jrdev = ctx->jrdev;
- u32 *desc = edesc->hw_desc, options;
- int ret, sg_count, assoc_sg_count;
int ivsize = crypto_aead_ivsize(aead);
int authsize = ctx->authsize;
- dma_addr_t ptr, dst_dma, src_dma;
-#ifdef DEBUG
- u32 *sh_desc = ctx->sh_desc;
+ u32 *desc = edesc->hw_desc;
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, link_tbl_index = 0;
+#ifdef DEBUG
debug("assoclen %d cryptlen %d authsize %d\n",
- areq->assoclen, areq->cryptlen, authsize);
+ req->assoclen, req->cryptlen, authsize);
print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->assoc),
- areq->assoclen , 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src) - ivsize,
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents ? 100 : ivsize, 1);
print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(areq->src),
- edesc->src_nents ? 100 : areq->cryptlen + authsize, 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->cryptlen, 1);
print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
desc_bytes(sh_desc), 1);
#endif
- assoc_sg_count = dma_map_sg(jrdev, areq->assoc, edesc->assoc_nents ?: 1,
- DMA_TO_DEVICE);
- if (areq->src == areq->dst)
- sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1,
- DMA_BIDIRECTIONAL);
- else
- sg_count = dma_map_sg(jrdev, areq->src, edesc->src_nents ? : 1,
- DMA_TO_DEVICE);
- /* start auth operation */
- append_operation(desc, ctx->class2_alg_type | OP_ALG_AS_INITFINAL |
- (encrypt ? : OP_ALG_ICV_ON));
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- /* Load FIFO with data for Class 2 CHA */
- options = FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG;
- if (!edesc->assoc_nents) {
- ptr = sg_dma_address(areq->assoc);
+ if (all_contig) {
+ src_dma = sg_dma_address(req->assoc);
+ in_options = 0;
} else {
- sg_to_link_tbl(areq->assoc, edesc->assoc_nents,
- edesc->link_tbl, 0);
- ptr = edesc->link_tbl_dma;
- options |= LDST_SGF;
+ src_dma = edesc->link_tbl_dma;
+ link_tbl_index += (edesc->assoc_nents ? : 1) + 1 +
+ (edesc->src_nents ? : 1);
+ in_options = LDST_SGF;
}
- append_fifo_load(desc, ptr, areq->assoclen, options);
-
- /* copy iv from cipher/class1 input context to class2 infifo */
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
-
- if (!encrypt) {
- u32 *jump_cmd, *uncond_jump_cmd;
-
- /* JUMP if shared */
- jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
+ if (encrypt)
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
+ req->cryptlen - authsize, in_options);
+ else
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
+ req->cryptlen, in_options);
- /* start class 1 (cipher) operation, non-shared version */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL);
+ if (likely(req->src == req->dst)) {
+ if (all_contig) {
+ dst_dma = sg_dma_address(req->src);
+ } else {
+ dst_dma = src_dma + sizeof(struct link_tbl_entry) *
+ ((edesc->assoc_nents ? : 1) + 1);
+ out_options = LDST_SGF;
+ }
+ } else {
+ if (!edesc->dst_nents) {
+ dst_dma = sg_dma_address(req->dst);
+ } else {
+ dst_dma = edesc->link_tbl_dma +
+ link_tbl_index *
+ sizeof(struct link_tbl_entry);
+ out_options = LDST_SGF;
+ }
+ }
+ if (encrypt)
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
+ else
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize,
+ out_options);
+}
- uncond_jump_cmd = append_jump(desc, 0);
+/*
+ * Fill in aead givencrypt job descriptor
+ */
+static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
+ struct aead_edesc *edesc,
+ struct aead_request *req,
+ int contig)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ivsize = crypto_aead_ivsize(aead);
+ int authsize = ctx->authsize;
+ u32 *desc = edesc->hw_desc;
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, link_tbl_index = 0;
- set_jump_tgt_here(desc, jump_cmd);
+#ifdef DEBUG
+ debug("assoclen %d cryptlen %d authsize %d\n",
+ req->assoclen, req->cryptlen, authsize);
+ print_hex_dump(KERN_ERR, "assoc @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->assoc),
+ req->assoclen , 1);
+ print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
+ print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
+ print_hex_dump(KERN_ERR, "shrdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sh_desc,
+ desc_bytes(sh_desc), 1);
+#endif
- /* start class 1 (cipher) operation, shared version */
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | OP_ALG_AAI_DK);
- set_jump_tgt_here(desc, uncond_jump_cmd);
- } else
- append_operation(desc, ctx->class1_alg_type |
- OP_ALG_AS_INITFINAL | encrypt);
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- /* load payload & instruct to class2 to snoop class 1 if encrypting */
- options = 0;
- if (!edesc->src_nents) {
- src_dma = sg_dma_address(areq->src);
+ if (contig & GIV_SRC_CONTIG) {
+ src_dma = sg_dma_address(req->assoc);
+ in_options = 0;
} else {
- sg_to_link_tbl(areq->src, edesc->src_nents, edesc->link_tbl +
- edesc->assoc_nents, 0);
- src_dma = edesc->link_tbl_dma + edesc->assoc_nents *
- sizeof(struct link_tbl_entry);
- options |= LDST_SGF;
+ src_dma = edesc->link_tbl_dma;
+ link_tbl_index += edesc->assoc_nents + 1 + edesc->src_nents;
+ in_options = LDST_SGF;
}
- append_seq_in_ptr(desc, src_dma, areq->cryptlen + authsize, options);
- append_seq_fifo_load(desc, areq->cryptlen, FIFOLD_CLASS_BOTH |
- FIFOLD_TYPE_LASTBOTH |
- (encrypt ? FIFOLD_TYPE_MSG1OUT2
- : FIFOLD_TYPE_MSG));
-
- /* specify destination */
- if (areq->src == areq->dst) {
- dst_dma = src_dma;
+ append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize +
+ req->cryptlen - authsize, in_options);
+
+ if (contig & GIV_DST_CONTIG) {
+ dst_dma = edesc->iv_dma;
} else {
- sg_count = dma_map_sg(jrdev, areq->dst, edesc->dst_nents ? : 1,
- DMA_FROM_DEVICE);
- if (!edesc->dst_nents) {
- dst_dma = sg_dma_address(areq->dst);
- options = 0;
+ if (likely(req->src == req->dst)) {
+ dst_dma = src_dma + sizeof(struct link_tbl_entry) *
+ edesc->assoc_nents;
+ out_options = LDST_SGF;
} else {
- sg_to_link_tbl(areq->dst, edesc->dst_nents,
- edesc->link_tbl + edesc->assoc_nents +
- edesc->src_nents, 0);
- dst_dma = edesc->link_tbl_dma + (edesc->assoc_nents +
- edesc->src_nents) *
+ dst_dma = edesc->link_tbl_dma +
+ link_tbl_index *
sizeof(struct link_tbl_entry);
- options = LDST_SGF;
+ out_options = LDST_SGF;
}
}
- append_seq_out_ptr(desc, dst_dma, areq->cryptlen + authsize, options);
- append_seq_fifo_store(desc, areq->cryptlen, FIFOST_TYPE_MESSAGE_DATA);
- /* ICV */
- if (encrypt)
- append_seq_store(desc, authsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
- else
- append_seq_fifo_load(desc, authsize, FIFOLD_CLASS_CLASS2 |
- FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+ append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen, out_options);
+}
+
+/*
+ * Fill in ablkcipher job descriptor
+ */
+static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
+ struct ablkcipher_edesc *edesc,
+ struct ablkcipher_request *req,
+ bool iv_contig)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ u32 *desc = edesc->hw_desc;
+ u32 out_options = 0, in_options;
+ dma_addr_t dst_dma, src_dma;
+ int len, link_tbl_index = 0;
#ifdef DEBUG
- debug("job_desc_len %d\n", desc_len(desc));
- print_hex_dump(KERN_ERR, "jobdesc@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc) , 1);
- print_hex_dump(KERN_ERR, "jdlinkt@"xstr(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
- edesc->link_tbl_bytes, 1);
+ print_hex_dump(KERN_ERR, "presciv@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ ivsize, 1);
+ print_hex_dump(KERN_ERR, "src @"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ edesc->src_nents ? 100 : req->nbytes, 1);
#endif
- ret = caam_jr_enqueue(jrdev, desc, callback, areq);
- if (!ret)
- ret = -EINPROGRESS;
- else {
- ipsec_esp_unmap(jrdev, edesc, areq);
- kfree(edesc);
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+ if (iv_contig) {
+ src_dma = edesc->iv_dma;
+ in_options = 0;
+ } else {
+ src_dma = edesc->link_tbl_dma;
+ link_tbl_index += (iv_contig ? 0 : 1) + edesc->src_nents;
+ in_options = LDST_SGF;
}
+ append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
- return ret;
+ if (likely(req->src == req->dst)) {
+ if (!edesc->src_nents && iv_contig) {
+ dst_dma = sg_dma_address(req->src);
+ } else {
+ dst_dma = edesc->link_tbl_dma +
+ sizeof(struct link_tbl_entry);
+ out_options = LDST_SGF;
+ }
+ } else {
+ if (!edesc->dst_nents) {
+ dst_dma = sg_dma_address(req->dst);
+ } else {
+ dst_dma = edesc->link_tbl_dma +
+ link_tbl_index * sizeof(struct link_tbl_entry);
+ out_options = LDST_SGF;
+ }
+ }
+ append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
}
/*
* derive number of elements in scatterlist
*/
-static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
+static int sg_count(struct scatterlist *sg_list, int nbytes)
{
struct scatterlist *sg = sg_list;
int sg_nents = 0;
- *chained = 0;
while (nbytes > 0) {
sg_nents++;
nbytes -= sg->length;
if (!sg_is_last(sg) && (sg + 1)->length == 0)
- *chained = 1;
+ BUG(); /* Not support chaining */
sg = scatterwalk_sg_next(sg);
}
+ if (likely(sg_nents == 1))
+ return 0;
+
return sg_nents;
}
/*
- * allocate and map the ipsec_esp extended descriptor
+ * allocate and map the aead extended descriptor
*/
-static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
- int desc_bytes)
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ int desc_bytes, bool *all_contig_ptr)
{
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- gfp_t flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
- GFP_ATOMIC;
- int assoc_nents, src_nents, dst_nents = 0, chained, link_tbl_bytes;
- struct ipsec_esp_edesc *edesc;
-
- assoc_nents = sg_count(areq->assoc, areq->assoclen, &chained);
- BUG_ON(chained);
- if (likely(assoc_nents == 1))
- assoc_nents = 0;
-
- src_nents = sg_count(areq->src, areq->cryptlen + ctx->authsize,
- &chained);
- BUG_ON(chained);
- if (src_nents == 1)
- src_nents = 0;
-
- if (unlikely(areq->dst != areq->src)) {
- dst_nents = sg_count(areq->dst, areq->cryptlen + ctx->authsize,
- &chained);
- BUG_ON(chained);
- if (dst_nents == 1)
- dst_nents = 0;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ int assoc_nents, src_nents, dst_nents = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ int sgc;
+ bool all_contig = true;
+ int ivsize = crypto_aead_ivsize(aead);
+ int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
+
+ assoc_nents = sg_count(req->assoc, req->assoclen);
+ src_nents = sg_count(req->src, req->cryptlen);
+
+ if (unlikely(req->dst != req->src))
+ dst_nents = sg_count(req->dst, req->cryptlen);
+
+ sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
+ DMA_BIDIRECTIONAL);
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL);
+ } else {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE);
}
- link_tbl_bytes = (assoc_nents + src_nents + dst_nents) *
- sizeof(struct link_tbl_entry);
- debug("link_tbl_bytes %d\n", link_tbl_bytes);
+ /* Check if data are contiguous */
+ iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
+ if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
+ iv_dma || src_nents || iv_dma + ivsize !=
+ sg_dma_address(req->src)) {
+ all_contig = false;
+ assoc_nents = assoc_nents ? : 1;
+ src_nents = src_nents ? : 1;
+ link_tbl_len = assoc_nents + 1 + src_nents;
+ }
+ link_tbl_len += dst_nents;
+
+ link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kmalloc(sizeof(struct ipsec_esp_edesc) + desc_bytes +
+ edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
link_tbl_bytes, GFP_DMA | flags);
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -753,142 +1354,450 @@ static struct ipsec_esp_edesc *ipsec_esp_edesc_alloc(struct aead_request *areq,
edesc->assoc_nents = assoc_nents;
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
- edesc->link_tbl = (void *)edesc + sizeof(struct ipsec_esp_edesc) +
+ edesc->iv_dma = iv_dma;
+ edesc->link_tbl_bytes = link_tbl_bytes;
+ edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
desc_bytes;
edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
link_tbl_bytes, DMA_TO_DEVICE);
- edesc->link_tbl_bytes = link_tbl_bytes;
+ *all_contig_ptr = all_contig;
+
+ link_tbl_index = 0;
+ if (!all_contig) {
+ sg_to_link_tbl(req->assoc,
+ (assoc_nents ? : 1),
+ edesc->link_tbl +
+ link_tbl_index, 0);
+ link_tbl_index += assoc_nents ? : 1;
+ sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
+ iv_dma, ivsize, 0);
+ link_tbl_index += 1;
+ sg_to_link_tbl_last(req->src,
+ (src_nents ? : 1),
+ edesc->link_tbl +
+ link_tbl_index, 0);
+ link_tbl_index += src_nents ? : 1;
+ }
+ if (dst_nents) {
+ sg_to_link_tbl_last(req->dst, dst_nents,
+ edesc->link_tbl + link_tbl_index, 0);
+ }
return edesc;
}
-static int aead_authenc_encrypt(struct aead_request *areq)
+static int aead_encrypt(struct aead_request *req)
{
- struct ipsec_esp_edesc *edesc;
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- int ivsize = crypto_aead_ivsize(aead);
+ bool all_contig;
u32 *desc;
- dma_addr_t iv_dma;
+ int ret = 0;
+
+ req->cryptlen += ctx->authsize;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_ENCRYPT_TEXT_LEN *
- CAAM_CMD_SZ);
+ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &all_contig);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
- desc = edesc->hw_desc;
-
- /* insert shared descriptor pointer */
- init_job_desc_shared(desc, ctx->shared_desc_phys,
- desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
-
- iv_dma = dma_map_single(jrdev, areq->iv, ivsize, DMA_TO_DEVICE);
- /* check dma error */
+ /* Create and submit job descriptor */
+ init_aead_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req,
+ all_contig, true);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
- append_load(desc, iv_dma, ivsize,
- LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
- return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done);
+ return ret;
}
-static int aead_authenc_decrypt(struct aead_request *req)
+static int aead_decrypt(struct aead_request *req)
{
+ struct aead_edesc *edesc;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
- int ivsize = crypto_aead_ivsize(aead);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- struct ipsec_esp_edesc *edesc;
+ bool all_contig;
u32 *desc;
- dma_addr_t iv_dma;
-
- req->cryptlen -= ctx->authsize;
+ int ret = 0;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(req, DESC_AEAD_DECRYPT_TEXT_LEN *
- CAAM_CMD_SZ);
+ edesc = aead_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &all_contig);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "dec src@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ req->cryptlen, 1);
+#endif
+
+ /* Create and submit job descriptor*/
+ init_aead_job(ctx->sh_desc_dec,
+ ctx->sh_desc_dec_dma, edesc, req, all_contig, false);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
- /* insert shared descriptor pointer */
- init_job_desc_shared(desc, ctx->shared_desc_phys,
- desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
+ return ret;
+}
- iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
- /* check dma error */
+/*
+ * allocate and map the aead extended descriptor for aead givencrypt
+ */
+static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
+ *greq, int desc_bytes,
+ u32 *contig_ptr)
+{
+ struct aead_request *req = &greq->areq;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC;
+ int assoc_nents, src_nents, dst_nents = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ int sgc;
+ u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG;
+ int ivsize = crypto_aead_ivsize(aead);
+ int link_tbl_index, link_tbl_len = 0, link_tbl_bytes;
+
+ assoc_nents = sg_count(req->assoc, req->assoclen);
+ src_nents = sg_count(req->src, req->cryptlen);
- append_load(desc, iv_dma, ivsize,
- LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT);
+ if (unlikely(req->dst != req->src))
+ dst_nents = sg_count(req->dst, req->cryptlen);
- return ipsec_esp(edesc, req, !OP_ALG_ENCRYPT, ipsec_esp_decrypt_done);
+ sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1,
+ DMA_BIDIRECTIONAL);
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL);
+ } else {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE);
+ }
+
+ /* Check if data are contiguous */
+ iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+ if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
+ iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
+ contig &= ~GIV_SRC_CONTIG;
+ if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
+ contig &= ~GIV_DST_CONTIG;
+ if (unlikely(req->src != req->dst)) {
+ dst_nents = dst_nents ? : 1;
+ link_tbl_len += 1;
+ }
+ if (!(contig & GIV_SRC_CONTIG)) {
+ assoc_nents = assoc_nents ? : 1;
+ src_nents = src_nents ? : 1;
+ link_tbl_len += assoc_nents + 1 + src_nents;
+ if (likely(req->src == req->dst))
+ contig &= ~GIV_DST_CONTIG;
+ }
+ link_tbl_len += dst_nents;
+
+ link_tbl_bytes = link_tbl_len * sizeof(struct link_tbl_entry);
+
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct aead_edesc) + desc_bytes +
+ link_tbl_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->assoc_nents = assoc_nents;
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+ edesc->link_tbl_bytes = link_tbl_bytes;
+ edesc->link_tbl = (void *)edesc + sizeof(struct aead_edesc) +
+ desc_bytes;
+ edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
+ link_tbl_bytes, DMA_TO_DEVICE);
+ *contig_ptr = contig;
+
+ link_tbl_index = 0;
+ if (!(contig & GIV_SRC_CONTIG)) {
+ sg_to_link_tbl(req->assoc, assoc_nents,
+ edesc->link_tbl +
+ link_tbl_index, 0);
+ link_tbl_index += assoc_nents;
+ sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
+ iv_dma, ivsize, 0);
+ link_tbl_index += 1;
+ sg_to_link_tbl_last(req->src, src_nents,
+ edesc->link_tbl +
+ link_tbl_index, 0);
+ link_tbl_index += src_nents;
+ }
+ if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
+ sg_to_link_tbl_one(edesc->link_tbl + link_tbl_index,
+ iv_dma, ivsize, 0);
+ link_tbl_index += 1;
+ sg_to_link_tbl_last(req->dst, dst_nents,
+ edesc->link_tbl + link_tbl_index, 0);
+ }
+
+ return edesc;
}
-static int aead_authenc_givencrypt(struct aead_givcrypt_request *req)
+static int aead_givencrypt(struct aead_givcrypt_request *areq)
{
- struct aead_request *areq = &req->areq;
- struct ipsec_esp_edesc *edesc;
- struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+ struct aead_request *req = &areq->areq;
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
- int ivsize = crypto_aead_ivsize(aead);
- dma_addr_t iv_dma;
+ u32 contig;
u32 *desc;
+ int ret = 0;
- iv_dma = dma_map_single(jrdev, req->giv, ivsize, DMA_FROM_DEVICE);
-
- debug("%s: giv %p\n", __func__, req->giv);
+ req->cryptlen += ctx->authsize;
/* allocate extended descriptor */
- edesc = ipsec_esp_edesc_alloc(areq, DESC_AEAD_GIVENCRYPT_TEXT_LEN *
- CAAM_CMD_SZ);
+ edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &contig);
+
if (IS_ERR(edesc))
return PTR_ERR(edesc);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "giv src@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+ req->cryptlen, 1);
+#endif
+
+ /* Create and submit job descriptor*/
+ init_aead_giv_job(ctx->sh_desc_givenc,
+ ctx->sh_desc_givenc_dma, edesc, req, contig);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "aead jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ aead_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
- /* insert shared descriptor pointer */
- init_job_desc_shared(desc, ctx->shared_desc_phys,
- desc_len(ctx->sh_desc), HDR_SHARE_DEFER);
+ return ret;
+}
- /*
- * LOAD IMM Info FIFO
- * to DECO, Last, Padding, Random, Message, 16 bytes
- */
- append_load_imm_u32(desc, NFIFOENTRY_DEST_DECO | NFIFOENTRY_LC1 |
- NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DTYPE_MSG |
- NFIFOENTRY_PTYPE_RND | ivsize,
- LDST_SRCDST_WORD_INFO_FIFO);
+/*
+ * allocate and map the ablkcipher extended descriptor for ablkcipher
+ */
+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ *req, int desc_bytes,
+ bool *iv_contig_out)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+ CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, dst_nents = 0, link_tbl_bytes;
+ struct ablkcipher_edesc *edesc;
+ dma_addr_t iv_dma = 0;
+ bool iv_contig = false;
+ int sgc;
+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int link_tbl_index;
+
+ src_nents = sg_count(req->src, req->nbytes);
+
+ if (unlikely(req->dst != req->src))
+ dst_nents = sg_count(req->dst, req->nbytes);
+
+ if (likely(req->src == req->dst)) {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_BIDIRECTIONAL);
+ } else {
+ sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1,
+ DMA_TO_DEVICE);
+ sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1,
+ DMA_FROM_DEVICE);
+ }
/*
- * disable info fifo entries since the above serves as the entry
- * this way, the MOVE command won't generate an entry.
- * Note that this isn't required in more recent versions of
- * SEC as a MOVE that doesn't do info FIFO entries is available.
+ * Check if iv can be contiguous with source and destination.
+ * If so, include it. If not, create scatterlist.
*/
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+ if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
+ iv_contig = true;
+ else
+ src_nents = src_nents ? : 1;
+ link_tbl_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
+ sizeof(struct link_tbl_entry);
- /* MOVE DECO Alignment -> C1 Context 16 bytes */
- append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | ivsize);
+ /* allocate space for base edesc and hw desc commands, link tables */
+ edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
+ link_tbl_bytes, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(jrdev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
- /* re-enable info fifo entries */
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->link_tbl_bytes = link_tbl_bytes;
+ edesc->link_tbl = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+ desc_bytes;
+
+ link_tbl_index = 0;
+ if (!iv_contig) {
+ sg_to_link_tbl_one(edesc->link_tbl, iv_dma, ivsize, 0);
+ sg_to_link_tbl_last(req->src, src_nents,
+ edesc->link_tbl + 1, 0);
+ link_tbl_index += 1 + src_nents;
+ }
+
+ if (unlikely(dst_nents)) {
+ sg_to_link_tbl_last(req->dst, dst_nents,
+ edesc->link_tbl + link_tbl_index, 0);
+ }
+
+ edesc->link_tbl_dma = dma_map_single(jrdev, edesc->link_tbl,
+ link_tbl_bytes, DMA_TO_DEVICE);
+ edesc->iv_dma = iv_dma;
+
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher link_tbl@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->link_tbl,
+ link_tbl_bytes, 1);
+#endif
+
+ *iv_contig_out = iv_contig;
+ return edesc;
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ bool iv_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &iv_contig);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
- /* MOVE C1 Context -> OFIFO 16 bytes */
- append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | ivsize);
+ /* Create and submit job descriptor*/
+ init_ablkcipher_job(ctx->sh_desc_enc,
+ ctx->sh_desc_enc_dma, edesc, req, iv_contig);
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+ desc = edesc->hw_desc;
+ ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
- append_fifo_store(desc, iv_dma, ivsize, FIFOST_TYPE_MESSAGE_DATA);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
- return ipsec_esp(edesc, areq, OP_ALG_ENCRYPT, ipsec_esp_encrypt_done);
+ return ret;
}
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+ struct ablkcipher_edesc *edesc;
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+ bool iv_contig;
+ u32 *desc;
+ int ret = 0;
+
+ /* allocate extended descriptor */
+ edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+ CAAM_CMD_SZ, &iv_contig);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /* Create and submit job descriptor*/
+ init_ablkcipher_job(ctx->sh_desc_dec,
+ ctx->sh_desc_dec_dma, edesc, req, iv_contig);
+ desc = edesc->hw_desc;
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"xstr(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+ desc_bytes(edesc->hw_desc), 1);
+#endif
+
+ ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
+ if (!ret) {
+ ret = -EINPROGRESS;
+ } else {
+ ablkcipher_unmap(jrdev, edesc, req);
+ kfree(edesc);
+ }
+
+ return ret;
+}
+
+#define template_aead template_u.aead
+#define template_ablkcipher template_u.ablkcipher
struct caam_alg_template {
char name[CRYPTO_MAX_ALG_NAME];
char driver_name[CRYPTO_MAX_ALG_NAME];
unsigned int blocksize;
- struct aead_alg aead;
+ u32 type;
+ union {
+ struct ablkcipher_alg ablkcipher;
+ struct aead_alg aead;
+ struct blkcipher_alg blkcipher;
+ struct cipher_alg cipher;
+ struct compress_alg compress;
+ struct rng_alg rng;
+ } template_u;
u32 class1_alg_type;
u32 class2_alg_type;
u32 alg_op;
@@ -900,12 +1809,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha1),cbc(aes))",
.driver_name = "authenc-hmac-sha1-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -918,12 +1828,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha256),cbc(aes))",
.driver_name = "authenc-hmac-sha256-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -937,12 +1848,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha512),cbc(aes))",
.driver_name = "authenc-hmac-sha512-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -956,12 +1868,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha1),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -974,12 +1887,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha256),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha256-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -993,12 +1907,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha512),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha512-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES3_EDE_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -1012,12 +1927,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha1),cbc(des))",
.driver_name = "authenc-hmac-sha1-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
@@ -1030,12 +1946,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha256),cbc(des))",
.driver_name = "authenc-hmac-sha256-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
@@ -1049,12 +1966,13 @@ static struct caam_alg_template driver_algs[] = {
.name = "authenc(hmac(sha512),cbc(des))",
.driver_name = "authenc-hmac-sha512-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
- .aead = {
- .setkey = aead_authenc_setkey,
- .setauthsize = aead_authenc_setauthsize,
- .encrypt = aead_authenc_encrypt,
- .decrypt = aead_authenc_decrypt,
- .givencrypt = aead_authenc_givencrypt,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
.geniv = "<built-in>",
.ivsize = DES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
@@ -1064,6 +1982,55 @@ static struct caam_alg_template driver_algs[] = {
OP_ALG_AAI_HMAC_PRECOMP,
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
+ /* ablkcipher descriptor */
+ {
+ .name = "cbc(aes)",
+ .driver_name = "cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des3_ede)",
+ .driver_name = "cbc-3des-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .name = "cbc(des)",
+ .driver_name = "cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+ .template_ablkcipher = {
+ .setkey = ablkcipher_setkey,
+ .encrypt = ablkcipher_encrypt,
+ .decrypt = ablkcipher_decrypt,
+ .geniv = "eseqiv",
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ }
};
struct caam_crypto_alg {
@@ -1102,16 +2069,19 @@ static void caam_cra_exit(struct crypto_tfm *tfm)
{
struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
- if (!dma_mapping_error(ctx->jrdev, ctx->shared_desc_phys))
- dma_unmap_single(ctx->jrdev, ctx->shared_desc_phys,
- desc_bytes(ctx->sh_desc), DMA_TO_DEVICE);
- kfree(ctx->sh_desc);
-
- if (!dma_mapping_error(ctx->jrdev, ctx->key_phys))
- dma_unmap_single(ctx->jrdev, ctx->key_phys,
- ctx->split_key_pad_len + ctx->enckeylen,
+ if (ctx->sh_desc_enc_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_enc_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_enc_dma,
+ desc_bytes(ctx->sh_desc_enc), DMA_TO_DEVICE);
+ if (ctx->sh_desc_dec_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_dec_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_dec_dma,
+ desc_bytes(ctx->sh_desc_dec), DMA_TO_DEVICE);
+ if (ctx->sh_desc_givenc_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->sh_desc_givenc_dma))
+ dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
+ desc_bytes(ctx->sh_desc_givenc),
DMA_TO_DEVICE);
- kfree(ctx->key);
}
static void __exit caam_algapi_exit(void)
@@ -1175,12 +2145,20 @@ static struct caam_crypto_alg *caam_alg_alloc(struct device *ctrldev,
alg->cra_init = caam_cra_init;
alg->cra_exit = caam_cra_exit;
alg->cra_priority = CAAM_CRA_PRIORITY;
- alg->cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_type = &crypto_aead_type;
alg->cra_ctxsize = sizeof(struct caam_ctx);
- alg->cra_u.aead = template->aead;
+ alg->cra_flags = CRYPTO_ALG_ASYNC | template->type;
+ switch (template->type) {
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+ alg->cra_type = &crypto_ablkcipher_type;
+ alg->cra_ablkcipher = template->template_ablkcipher;
+ break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ alg->cra_type = &crypto_aead_type;
+ alg->cra_aead = template->template_aead;
+ break;
+ }
t_alg->class1_alg_type = template->class1_alg_type;
t_alg->class2_alg_type = template->class2_alg_type;
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 9504503..d38f2af 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -31,5 +31,6 @@
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
+#include <crypto/internal/skcipher.h>
#endif /* !defined(CAAM_COMPAT_H) */
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 9009713..73988bb 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -52,9 +52,11 @@ static int caam_probe(struct platform_device *pdev)
struct caam_ctrl __iomem *ctrl;
struct caam_full __iomem *topregs;
struct caam_drv_private *ctrlpriv;
- struct caam_perfmon *perfmon;
struct caam_deco **deco;
u32 deconum;
+#ifdef CONFIG_DEBUG_FS
+ struct caam_perfmon *perfmon;
+#endif
ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
if (!ctrlpriv)
@@ -164,52 +166,52 @@ static int caam_probe(struct platform_device *pdev)
/* Controller-level - performance monitor counters */
ctrlpriv->ctl_rq_dequeued =
debugfs_create_u64("rq_dequeued",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->req_dequeued);
ctrlpriv->ctl_ob_enc_req =
debugfs_create_u64("ob_rq_encrypted",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ob_enc_req);
ctrlpriv->ctl_ib_dec_req =
debugfs_create_u64("ib_rq_decrypted",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ib_dec_req);
ctrlpriv->ctl_ob_enc_bytes =
debugfs_create_u64("ob_bytes_encrypted",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ob_enc_bytes);
ctrlpriv->ctl_ob_prot_bytes =
debugfs_create_u64("ob_bytes_protected",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ob_prot_bytes);
ctrlpriv->ctl_ib_dec_bytes =
debugfs_create_u64("ib_bytes_decrypted",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ib_dec_bytes);
ctrlpriv->ctl_ib_valid_bytes =
debugfs_create_u64("ib_bytes_validated",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->ib_valid_bytes);
/* Controller level - global status values */
ctrlpriv->ctl_faultaddr =
debugfs_create_u64("fault_addr",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->faultaddr);
ctrlpriv->ctl_faultdetail =
debugfs_create_u32("fault_detail",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->faultdetail);
ctrlpriv->ctl_faultstatus =
debugfs_create_u32("fault_status",
- S_IFCHR | S_IRUSR | S_IRGRP | S_IROTH,
+ S_IRUSR | S_IRGRP | S_IROTH,
ctrlpriv->ctl, &perfmon->status);
/* Internal covering keys (useful in non-secure mode only) */
ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_kek = debugfs_create_blob("kek",
- S_IFCHR | S_IRUSR |
+ S_IRUSR |
S_IRGRP | S_IROTH,
ctrlpriv->ctl,
&ctrlpriv->ctl_kek_wrap);
@@ -217,7 +219,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
- S_IFCHR | S_IRUSR |
+ S_IRUSR |
S_IRGRP | S_IROTH,
ctrlpriv->ctl,
&ctrlpriv->ctl_tkek_wrap);
@@ -225,7 +227,7 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
- S_IFCHR | S_IRUSR |
+ S_IRUSR |
S_IRGRP | S_IROTH,
ctrlpriv->ctl,
&ctrlpriv->ctl_tdsk_wrap);
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 4691580..0991323 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -9,7 +9,7 @@
#define IMMEDIATE (1 << 23)
#define CAAM_CMD_SZ sizeof(u32)
#define CAAM_PTR_SZ sizeof(dma_addr_t)
-#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * 64)
+#define CAAM_DESC_BYTES_MAX (CAAM_CMD_SZ * MAX_CAAM_DESCSIZE)
#ifdef DEBUG
#define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
@@ -18,6 +18,9 @@
#define PRINT_POS
#endif
+#define SET_OK_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
+ LDST_SRCDST_WORD_DECOCTRL | \
+ (LDOFF_CHG_SHARE_OK_PROP << LDST_OFFSET_SHIFT))
#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
LDST_SRCDST_WORD_DECOCTRL | \
(LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
@@ -203,3 +206,56 @@ static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
append_cmd(desc, immediate); \
}
APPEND_CMD_RAW_IMM(load, LOAD, u32);
+
+/*
+ * Append math command. Only the last part of destination and source need to
+ * be specified
+ */
+#define APPEND_MATH(op, desc, dest, src_0, src_1, len) \
+append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \
+ MATH_SRC0_##src_0 | MATH_SRC1_##src_1 | (u32) (len & MATH_LEN_MASK));
+
+#define append_math_add(desc, dest, src0, src1, len) \
+ APPEND_MATH(ADD, desc, dest, src0, src1, len)
+#define append_math_sub(desc, dest, src0, src1, len) \
+ APPEND_MATH(SUB, desc, dest, src0, src1, len)
+#define append_math_add_c(desc, dest, src0, src1, len) \
+ APPEND_MATH(ADDC, desc, dest, src0, src1, len)
+#define append_math_sub_b(desc, dest, src0, src1, len) \
+ APPEND_MATH(SUBB, desc, dest, src0, src1, len)
+#define append_math_and(desc, dest, src0, src1, len) \
+ APPEND_MATH(AND, desc, dest, src0, src1, len)
+#define append_math_or(desc, dest, src0, src1, len) \
+ APPEND_MATH(OR, desc, dest, src0, src1, len)
+#define append_math_xor(desc, dest, src0, src1, len) \
+ APPEND_MATH(XOR, desc, dest, src0, src1, len)
+#define append_math_lshift(desc, dest, src0, src1, len) \
+ APPEND_MATH(LSHIFT, desc, dest, src0, src1, len)
+#define append_math_rshift(desc, dest, src0, src1, len) \
+ APPEND_MATH(RSHIFT, desc, dest, src0, src1, len)
+
+/* Exactly one source is IMM. Data is passed in as u32 value */
+#define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \
+do { \
+ APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \
+ append_cmd(desc, data); \
+} while (0);
+
+#define append_math_add_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data)
+#define append_math_sub_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data)
+#define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data)
+#define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data)
+#define append_math_and_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data)
+#define append_math_or_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data)
+#define append_math_xor_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data)
+#define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data)
+#define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \
+ APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data)
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 7e2d54b..9b8d231 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -16,9 +16,13 @@
char *tmp; \
\
tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
- sprintf(tmp, format, param); \
- strcat(str, tmp); \
- kfree(tmp); \
+ if (likely(tmp)) { \
+ sprintf(tmp, format, param); \
+ strcat(str, tmp); \
+ kfree(tmp); \
+ } else { \
+ strcat(str, "kmalloc failure in SPRINTFCAT"); \
+ } \
}
static void report_jump_idx(u32 status, char *outstr)
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index 605fd20..819dfda 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -28,6 +28,7 @@
#include <linux/device.h>
#include <linux/dca.h>
#include <linux/slab.h>
+#include <linux/module.h>
#define DCA_VERSION "1.12.1"
@@ -35,7 +36,7 @@ MODULE_VERSION(DCA_VERSION);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Intel Corporation");
-static DEFINE_SPINLOCK(dca_lock);
+static DEFINE_RAW_SPINLOCK(dca_lock);
static LIST_HEAD(dca_domains);
@@ -101,10 +102,10 @@ static void unregister_dca_providers(void)
INIT_LIST_HEAD(&unregistered_providers);
- spin_lock_irqsave(&dca_lock, flags);
+ raw_spin_lock_irqsave(&dca_lock, flags);
if (list_empty(&dca_domains)) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
return;
}
@@ -116,7 +117,7 @@ static void unregister_dca_providers(void)
dca_free_domain(domain);
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
dca_sysfs_remove_provider(dca);
@@ -144,13 +145,8 @@ static struct dca_domain *dca_get_domain(struct device *dev)
domain = dca_find_domain(rc);
if (!domain) {
- if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
+ if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains))
dca_providers_blocked = 1;
- } else {
- domain = dca_allocate_domain(rc);
- if (domain)
- list_add(&domain->node, &dca_domains);
- }
}
return domain;
@@ -198,19 +194,19 @@ int dca_add_requester(struct device *dev)
if (!dev)
return -EFAULT;
- spin_lock_irqsave(&dca_lock, flags);
+ raw_spin_lock_irqsave(&dca_lock, flags);
/* check if the requester has not been added already */
dca = dca_find_provider_by_dev(dev);
if (dca) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
return -EEXIST;
}
pci_rc = dca_pci_rc_from_dev(dev);
domain = dca_find_domain(pci_rc);
if (!domain) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
return -ENODEV;
}
@@ -220,17 +216,17 @@ int dca_add_requester(struct device *dev)
break;
}
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
if (slot < 0)
return slot;
err = dca_sysfs_add_req(dca, dev, slot);
if (err) {
- spin_lock_irqsave(&dca_lock, flags);
+ raw_spin_lock_irqsave(&dca_lock, flags);
if (dca == dca_find_provider_by_dev(dev))
dca->ops->remove_requester(dca, dev);
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
return err;
}
@@ -251,14 +247,14 @@ int dca_remove_requester(struct device *dev)
if (!dev)
return -EFAULT;
- spin_lock_irqsave(&dca_lock, flags);
+ raw_spin_lock_irqsave(&dca_lock, flags);
dca = dca_find_provider_by_dev(dev);
if (!dca) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
return -ENODEV;
}
slot = dca->ops->remove_requester(dca, dev);
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
if (slot < 0)
return slot;
@@ -280,16 +276,16 @@ u8 dca_common_get_tag(struct device *dev, int cpu)
u8 tag;
unsigned long flags;
- spin_lock_irqsave(&dca_lock, flags);
+ raw_spin_lock_irqsave(&dca_lock, flags);
dca = dca_find_provider_by_dev(dev);
if (!dca) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
return -ENODEV;
}
tag = dca->ops->get_tag(dca, dev, cpu);
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
return tag;
}
@@ -360,36 +356,51 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
{
int err;
unsigned long flags;
- struct dca_domain *domain;
+ struct dca_domain *domain, *newdomain = NULL;
- spin_lock_irqsave(&dca_lock, flags);
+ raw_spin_lock_irqsave(&dca_lock, flags);
if (dca_providers_blocked) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
return -ENODEV;
}
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
err = dca_sysfs_add_provider(dca, dev);
if (err)
return err;
- spin_lock_irqsave(&dca_lock, flags);
+ raw_spin_lock_irqsave(&dca_lock, flags);
domain = dca_get_domain(dev);
if (!domain) {
+ struct pci_bus *rc;
+
if (dca_providers_blocked) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
dca_sysfs_remove_provider(dca);
unregister_dca_providers();
- } else {
- spin_unlock_irqrestore(&dca_lock, flags);
+ return -ENODEV;
+ }
+
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
+ rc = dca_pci_rc_from_dev(dev);
+ newdomain = dca_allocate_domain(rc);
+ if (!newdomain)
+ return -ENODEV;
+ raw_spin_lock_irqsave(&dca_lock, flags);
+ /* Recheck, we might have raced after dropping the lock */
+ domain = dca_get_domain(dev);
+ if (!domain) {
+ domain = newdomain;
+ newdomain = NULL;
+ list_add(&domain->node, &dca_domains);
}
- return -ENODEV;
}
list_add(&dca->node, &domain->dca_providers);
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
blocking_notifier_call_chain(&dca_provider_chain,
DCA_PROVIDER_ADD, NULL);
+ kfree(newdomain);
return 0;
}
EXPORT_SYMBOL_GPL(register_dca_provider);
@@ -407,10 +418,10 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
blocking_notifier_call_chain(&dca_provider_chain,
DCA_PROVIDER_REMOVE, NULL);
- spin_lock_irqsave(&dca_lock, flags);
+ raw_spin_lock_irqsave(&dca_lock, flags);
if (list_empty(&dca_domains)) {
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
return;
}
@@ -421,7 +432,7 @@ void unregister_dca_provider(struct dca_provider *dca, struct device *dev)
if (list_empty(&domain->dca_providers))
dca_free_domain(domain);
- spin_unlock_irqrestore(&dca_lock, flags);
+ raw_spin_unlock_irqrestore(&dca_lock, flags);
dca_sysfs_remove_provider(dca);
}
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c
index 5e8f335..591b659 100644
--- a/drivers/dca/dca-sysfs.c
+++ b/drivers/dca/dca-sysfs.c
@@ -27,6 +27,7 @@
#include <linux/err.h>
#include <linux/dca.h>
#include <linux/gfp.h>
+#include <linux/export.h>
static struct class *dca_class;
static struct idr dca_idr;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6a718b7..a842317 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -124,7 +124,7 @@ config MV_XOR
config MX3_IPU
bool "MX3x Image Processing Unit support"
- depends on ARCH_MX3
+ depends on SOC_IMX31 || SOC_IMX35
select DMA_ENGINE
default y
help
@@ -193,7 +193,8 @@ config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
config PL330_DMA
tristate "DMA API Driver for PL330"
select DMA_ENGINE
- depends on PL330
+ depends on ARM_AMBA
+ select PL330
help
Select if your platform has one or more PL330 DMACs.
You need to provide platform specific settings via
@@ -214,7 +215,7 @@ config PCH_DMA
config IMX_SDMA
tristate "i.MX SDMA support"
- depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5
+ depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5
select DMA_ENGINE
help
Support the i.MX SDMA engine. This engine is integrated into
@@ -236,6 +237,13 @@ config MXS_DMA
Support the MXS DMA engine. This engine including APBH-DMA
and APBX-DMA is integrated into Freescale i.MX23/28 chips.
+config EP93XX_DMA
+ bool "Cirrus Logic EP93xx DMA support"
+ depends on ARCH_EP93XX
+ select DMA_ENGINE
+ help
+ Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
+
config DMA_ENGINE
bool
@@ -246,6 +254,7 @@ config NET_DMA
bool "Network: TCP receive copy offload"
depends on DMA_ENGINE && NET
default (INTEL_IOATDMA || FSL_DMA)
+ depends on BROKEN
help
This enables the use of DMA engines in the network stack to
offload receive copy-to-user operations, freeing CPU cycles.
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 836095a..30cf3b1 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
+obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
diff --git a/drivers/dma/TODO b/drivers/dma/TODO
index a4af858..734ed02 100644
--- a/drivers/dma/TODO
+++ b/drivers/dma/TODO
@@ -9,6 +9,5 @@ TODO for slave dma
- mxs-dma.c
- dw_dmac
- intel_mid_dma
- - ste_dma40
4. Check other subsystems for dma drivers and merge/move to dmaengine
5. Remove dma_slave_config's dma direction.
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index e6d7228..b7cbd1a 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -66,31 +66,29 @@
* after the final transfer signalled by LBREQ or LSREQ. The DMAC
* will then move to the next LLI entry.
*
- * Only the former works sanely with scatter lists, so we only implement
- * the DMAC flow control method. However, peripherals which use the LBREQ
- * and LSREQ signals (eg, MMCI) are unable to use this mode, which through
- * these hardware restrictions prevents them from using scatter DMA.
- *
* Global TODO:
* - Break out common code from arch/arm/mach-s3c64xx and share
*/
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/dmapool.h>
-#include <linux/dmaengine.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl08x.h>
#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
-
+#include <linux/slab.h>
#include <asm/hardware/pl080.h>
#define DRIVER_NAME "pl08xdmac"
+static struct amba_driver pl08x_amba_driver;
+
/**
* struct vendor_data - vendor-specific config parameters for PL08x derivatives
* @channels: the number of channels available in this variant
@@ -125,7 +123,8 @@ struct pl08x_lli {
* @phy_chans: array of data for the physical channels
* @pool: a pool for the LLI descriptors
* @pool_ctr: counter of LLIs in the pool
- * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches
+ * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
+ * fetches
* @mem_buses: set to indicate memory transfers on AHB2.
* @lock: a spinlock for this struct
*/
@@ -148,22 +147,10 @@ struct pl08x_driver_data {
* PL08X specific defines
*/
-/*
- * Memory boundaries: the manual for PL08x says that the controller
- * cannot read past a 1KiB boundary, so these defines are used to
- * create transfer LLIs that do not cross such boundaries.
- */
-#define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */
-#define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT)
-
-/* Minimum period between work queue runs */
-#define PL08X_WQ_PERIODMIN 20
-
/* Size (bytes) of each LLI buffer allocated for one transfer */
# define PL08X_LLI_TSFR_SIZE 0x2000
/* Maximum times we call dma_pool_alloc on this pool without freeing */
-#define PL08X_MAX_ALLOCS 0x40
#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
#define PL08X_ALIGN 8
@@ -275,7 +262,6 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
writel(val, ch->base + PL080_CH_CONFIG);
}
-
/*
* pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
* clears any pending interrupt status. This should not be used for
@@ -366,7 +352,9 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
if (!list_empty(&plchan->pend_list)) {
struct pl08x_txd *txdi;
list_for_each_entry(txdi, &plchan->pend_list, node) {
- bytes += txdi->len;
+ struct pl08x_sg *dsg;
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ bytes += dsg->len;
}
}
@@ -410,6 +398,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
return NULL;
}
+ pm_runtime_get_sync(&pl08x->adev->dev);
return ch;
}
@@ -423,6 +412,8 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
/* Stop the channel and clear its interrupts */
pl08x_terminate_phy_chan(pl08x, ch);
+ pm_runtime_put(&pl08x->adev->dev);
+
/* Mark it as free */
ch->serving = NULL;
spin_unlock_irqrestore(&ch->lock, flags);
@@ -495,43 +486,37 @@ static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
struct pl08x_lli_build_data {
struct pl08x_txd *txd;
- struct pl08x_driver_data *pl08x;
struct pl08x_bus_data srcbus;
struct pl08x_bus_data dstbus;
size_t remainder;
+ u32 lli_bus;
};
/*
- * Autoselect a master bus to use for the transfer this prefers the
- * destination bus if both available if fixed address on one bus the
- * other will be chosen
+ * Autoselect a master bus to use for the transfer. Slave will be the chosen as
+ * victim in case src & dest are not similarly aligned. i.e. If after aligning
+ * masters address with width requirements of transfer (by sending few byte by
+ * byte data), slave is still not aligned, then its width will be reduced to
+ * BYTE.
+ * - prefers the destination bus if both available
+ * - prefers bus with fixed address (i.e. peripheral)
*/
static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
{
if (!(cctl & PL080_CONTROL_DST_INCR)) {
- *mbus = &bd->srcbus;
- *sbus = &bd->dstbus;
- } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
*mbus = &bd->dstbus;
*sbus = &bd->srcbus;
+ } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
+ *mbus = &bd->srcbus;
+ *sbus = &bd->dstbus;
} else {
- if (bd->dstbus.buswidth == 4) {
- *mbus = &bd->dstbus;
- *sbus = &bd->srcbus;
- } else if (bd->srcbus.buswidth == 4) {
- *mbus = &bd->srcbus;
- *sbus = &bd->dstbus;
- } else if (bd->dstbus.buswidth == 2) {
+ if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
*mbus = &bd->dstbus;
*sbus = &bd->srcbus;
- } else if (bd->srcbus.buswidth == 2) {
+ } else {
*mbus = &bd->srcbus;
*sbus = &bd->dstbus;
- } else {
- /* bd->srcbus.buswidth == 1 */
- *mbus = &bd->dstbus;
- *sbus = &bd->srcbus;
}
}
}
@@ -550,9 +535,9 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
llis_va[num_llis].cctl = cctl;
llis_va[num_llis].src = bd->srcbus.addr;
llis_va[num_llis].dst = bd->dstbus.addr;
- llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli);
- if (bd->pl08x->lli_buses & PL08X_AHB2)
- llis_va[num_llis].lli |= PL080_LLI_LM_AHB2;
+ llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
+ sizeof(struct pl08x_lli);
+ llis_va[num_llis].lli |= bd->lli_bus;
if (cctl & PL080_CONTROL_SRC_INCR)
bd->srcbus.addr += len;
@@ -564,16 +549,12 @@ static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
bd->remainder -= len;
}
-/*
- * Return number of bytes to fill to boundary, or len.
- * This calculation works for any value of addr.
- */
-static inline size_t pl08x_pre_boundary(u32 addr, size_t len)
+static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
+ u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
{
- size_t boundary_len = PL08X_BOUNDARY_SIZE -
- (addr & (PL08X_BOUNDARY_SIZE - 1));
-
- return min(boundary_len, len);
+ *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
+ pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
+ (*total_bytes) += len;
}
/*
@@ -587,13 +568,12 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
struct pl08x_bus_data *mbus, *sbus;
struct pl08x_lli_build_data bd;
int num_llis = 0;
- u32 cctl;
- size_t max_bytes_per_lli;
- size_t total_bytes = 0;
+ u32 cctl, early_bytes = 0;
+ size_t max_bytes_per_lli, total_bytes;
struct pl08x_lli *llis_va;
+ struct pl08x_sg *dsg;
- txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT,
- &txd->llis_bus);
+ txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
if (!txd->llis_va) {
dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
return 0;
@@ -601,13 +581,9 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
pl08x->pool_ctr++;
- /* Get the default CCTL */
- cctl = txd->cctl;
-
bd.txd = txd;
- bd.pl08x = pl08x;
- bd.srcbus.addr = txd->src_addr;
- bd.dstbus.addr = txd->dst_addr;
+ bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
+ cctl = txd->cctl;
/* Find maximum width of the source bus */
bd.srcbus.maxwidth =
@@ -619,215 +595,179 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
PL080_CONTROL_DWIDTH_SHIFT);
- /* Set up the bus widths to the maximum */
- bd.srcbus.buswidth = bd.srcbus.maxwidth;
- bd.dstbus.buswidth = bd.dstbus.maxwidth;
- dev_vdbg(&pl08x->adev->dev,
- "%s source bus is %d bytes wide, dest bus is %d bytes wide\n",
- __func__, bd.srcbus.buswidth, bd.dstbus.buswidth);
+ list_for_each_entry(dsg, &txd->dsg_list, node) {
+ total_bytes = 0;
+ cctl = txd->cctl;
+ bd.srcbus.addr = dsg->src_addr;
+ bd.dstbus.addr = dsg->dst_addr;
+ bd.remainder = dsg->len;
+ bd.srcbus.buswidth = bd.srcbus.maxwidth;
+ bd.dstbus.buswidth = bd.dstbus.maxwidth;
- /*
- * Bytes transferred == tsize * MIN(buswidths), not max(buswidths)
- */
- max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) *
- PL080_CONTROL_TRANSFER_SIZE_MASK;
- dev_vdbg(&pl08x->adev->dev,
- "%s max bytes per lli = %zu\n",
- __func__, max_bytes_per_lli);
+ pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
- /* We need to count this down to zero */
- bd.remainder = txd->len;
- dev_vdbg(&pl08x->adev->dev,
- "%s remainder = %zu\n",
- __func__, bd.remainder);
+ dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
+ bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
+ bd.srcbus.buswidth,
+ bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
+ bd.dstbus.buswidth,
+ bd.remainder);
+ dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
+ mbus == &bd.srcbus ? "src" : "dst",
+ sbus == &bd.srcbus ? "src" : "dst");
- /*
- * Choose bus to align to
- * - prefers destination bus if both available
- * - if fixed address on one bus chooses other
- */
- pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
+ /*
+ * Zero length is only allowed if all these requirements are
+ * met:
+ * - flow controller is peripheral.
+ * - src.addr is aligned to src.width
+ * - dst.addr is aligned to dst.width
+ *
+ * sg_len == 1 should be true, as there can be two cases here:
+ *
+ * - Memory addresses are contiguous and are not scattered.
+ * Here, Only one sg will be passed by user driver, with
+ * memory address and zero length. We pass this to controller
+ * and after the transfer it will receive the last burst
+ * request from peripheral and so transfer finishes.
+ *
+ * - Memory addresses are scattered and are not contiguous.
+ * Here, Obviously as DMA controller doesn't know when a lli's
+ * transfer gets over, it can't load next lli. So in this
+ * case, there has to be an assumption that only one lli is
+ * supported. Thus, we can't have scattered addresses.
+ */
+ if (!bd.remainder) {
+ u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
+ PL080_CONFIG_FLOW_CONTROL_SHIFT;
+ if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
+ (fc <= PL080_FLOW_SRC2DST_SRC))) {
+ dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
+ __func__);
+ return 0;
+ }
- if (txd->len < mbus->buswidth) {
- /* Less than a bus width available - send as single bytes */
- while (bd.remainder) {
- dev_vdbg(&pl08x->adev->dev,
- "%s single byte LLIs for a transfer of "
- "less than a bus width (remain 0x%08x)\n",
- __func__, bd.remainder);
- cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
- pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
- total_bytes++;
- }
- } else {
- /* Make one byte LLIs until master bus is aligned */
- while ((mbus->addr) % (mbus->buswidth)) {
- dev_vdbg(&pl08x->adev->dev,
- "%s adjustment lli for less than bus width "
- "(remain 0x%08x)\n",
- __func__, bd.remainder);
- cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
- pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
- total_bytes++;
+ if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
+ (bd.srcbus.addr % bd.srcbus.buswidth)) {
+ dev_err(&pl08x->adev->dev,
+ "%s src & dst address must be aligned to src"
+ " & dst width if peripheral is flow controller",
+ __func__);
+ return 0;
+ }
+
+ cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
+ bd.dstbus.buswidth, 0);
+ pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
+ break;
}
/*
- * Master now aligned
- * - if slave is not then we must set its width down
+ * Send byte by byte for following cases
+ * - Less than a bus width available
+ * - until master bus is aligned
*/
- if (sbus->addr % sbus->buswidth) {
- dev_dbg(&pl08x->adev->dev,
- "%s set down bus width to one byte\n",
- __func__);
+ if (bd.remainder < mbus->buswidth)
+ early_bytes = bd.remainder;
+ else if ((mbus->addr) % (mbus->buswidth)) {
+ early_bytes = mbus->buswidth - (mbus->addr) %
+ (mbus->buswidth);
+ if ((bd.remainder - early_bytes) < mbus->buswidth)
+ early_bytes = bd.remainder;
+ }
- sbus->buswidth = 1;
+ if (early_bytes) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%s byte width LLIs (remain 0x%08x)\n",
+ __func__, bd.remainder);
+ prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
+ &total_bytes);
}
- /*
- * Make largest possible LLIs until less than one bus
- * width left
- */
- while (bd.remainder > (mbus->buswidth - 1)) {
- size_t lli_len, target_len, tsize, odd_bytes;
+ if (bd.remainder) {
+ /*
+ * Master now aligned
+ * - if slave is not then we must set its width down
+ */
+ if (sbus->addr % sbus->buswidth) {
+ dev_dbg(&pl08x->adev->dev,
+ "%s set down bus width to one byte\n",
+ __func__);
+
+ sbus->buswidth = 1;
+ }
/*
- * If enough left try to send max possible,
- * otherwise try to send the remainder
+ * Bytes transferred = tsize * src width, not
+ * MIN(buswidths)
*/
- target_len = min(bd.remainder, max_bytes_per_lli);
+ max_bytes_per_lli = bd.srcbus.buswidth *
+ PL080_CONTROL_TRANSFER_SIZE_MASK;
+ dev_vdbg(&pl08x->adev->dev,
+ "%s max bytes per lli = %zu\n",
+ __func__, max_bytes_per_lli);
/*
- * Set bus lengths for incrementing buses to the
- * number of bytes which fill to next memory boundary,
- * limiting on the target length calculated above.
+ * Make largest possible LLIs until less than one bus
+ * width left
*/
- if (cctl & PL080_CONTROL_SRC_INCR)
- bd.srcbus.fill_bytes =
- pl08x_pre_boundary(bd.srcbus.addr,
- target_len);
- else
- bd.srcbus.fill_bytes = target_len;
-
- if (cctl & PL080_CONTROL_DST_INCR)
- bd.dstbus.fill_bytes =
- pl08x_pre_boundary(bd.dstbus.addr,
- target_len);
- else
- bd.dstbus.fill_bytes = target_len;
-
- /* Find the nearest */
- lli_len = min(bd.srcbus.fill_bytes,
- bd.dstbus.fill_bytes);
-
- BUG_ON(lli_len > bd.remainder);
-
- if (lli_len <= 0) {
- dev_err(&pl08x->adev->dev,
- "%s lli_len is %zu, <= 0\n",
- __func__, lli_len);
- return 0;
- }
+ while (bd.remainder > (mbus->buswidth - 1)) {
+ size_t lli_len, tsize, width;
- if (lli_len == target_len) {
- /*
- * Can send what we wanted.
- * Maintain alignment
- */
- lli_len = (lli_len/mbus->buswidth) *
- mbus->buswidth;
- odd_bytes = 0;
- } else {
/*
- * So now we know how many bytes to transfer
- * to get to the nearest boundary. The next
- * LLI will past the boundary. However, we
- * may be working to a boundary on the slave
- * bus. We need to ensure the master stays
- * aligned, and that we are working in
- * multiples of the bus widths.
+ * If enough left try to send max possible,
+ * otherwise try to send the remainder
*/
- odd_bytes = lli_len % mbus->buswidth;
- lli_len -= odd_bytes;
+ lli_len = min(bd.remainder, max_bytes_per_lli);
- }
-
- if (lli_len) {
/*
- * Check against minimum bus alignment:
- * Calculate actual transfer size in relation
- * to bus width an get a maximum remainder of
- * the smallest bus width - 1
+ * Check against maximum bus alignment:
+ * Calculate actual transfer size in relation to
+ * bus width an get a maximum remainder of the
+ * highest bus width - 1
*/
- /* FIXME: use round_down()? */
- tsize = lli_len / min(mbus->buswidth,
- sbus->buswidth);
- lli_len = tsize * min(mbus->buswidth,
- sbus->buswidth);
-
- if (target_len != lli_len) {
- dev_vdbg(&pl08x->adev->dev,
- "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n",
- __func__, target_len, lli_len, txd->len);
- }
-
- cctl = pl08x_cctl_bits(cctl,
- bd.srcbus.buswidth,
- bd.dstbus.buswidth,
- tsize);
+ width = max(mbus->buswidth, sbus->buswidth);
+ lli_len = (lli_len / width) * width;
+ tsize = lli_len / bd.srcbus.buswidth;
dev_vdbg(&pl08x->adev->dev,
- "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n",
+ "%s fill lli with single lli chunk of "
+ "size 0x%08zx (remainder 0x%08zx)\n",
__func__, lli_len, bd.remainder);
+
+ cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
+ bd.dstbus.buswidth, tsize);
pl08x_fill_lli_for_desc(&bd, num_llis++,
- lli_len, cctl);
+ lli_len, cctl);
total_bytes += lli_len;
}
-
- if (odd_bytes) {
- /*
- * Creep past the boundary, maintaining
- * master alignment
- */
- int j;
- for (j = 0; (j < mbus->buswidth)
- && (bd.remainder); j++) {
- cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
- dev_vdbg(&pl08x->adev->dev,
- "%s align with boundary, single byte (remain 0x%08zx)\n",
- __func__, bd.remainder);
- pl08x_fill_lli_for_desc(&bd,
- num_llis++, 1, cctl);
- total_bytes++;
- }
+ /*
+ * Send any odd bytes
+ */
+ if (bd.remainder) {
+ dev_vdbg(&pl08x->adev->dev,
+ "%s align with boundary, send odd bytes (remain %zu)\n",
+ __func__, bd.remainder);
+ prep_byte_width_lli(&bd, &cctl, bd.remainder,
+ num_llis++, &total_bytes);
}
}
- /*
- * Send any odd bytes
- */
- while (bd.remainder) {
- cctl = pl08x_cctl_bits(cctl, 1, 1, 1);
- dev_vdbg(&pl08x->adev->dev,
- "%s align with boundary, single odd byte (remain %zu)\n",
- __func__, bd.remainder);
- pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl);
- total_bytes++;
+ if (total_bytes != dsg->len) {
+ dev_err(&pl08x->adev->dev,
+ "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
+ __func__, total_bytes, dsg->len);
+ return 0;
}
- }
- if (total_bytes != txd->len) {
- dev_err(&pl08x->adev->dev,
- "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
- __func__, total_bytes, txd->len);
- return 0;
- }
- if (num_llis >= MAX_NUM_TSFR_LLIS) {
- dev_err(&pl08x->adev->dev,
- "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
- __func__, (u32) MAX_NUM_TSFR_LLIS);
- return 0;
+ if (num_llis >= MAX_NUM_TSFR_LLIS) {
+ dev_err(&pl08x->adev->dev,
+ "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
+ __func__, (u32) MAX_NUM_TSFR_LLIS);
+ return 0;
+ }
}
llis_va = txd->llis_va;
@@ -840,15 +780,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
{
int i;
+ dev_vdbg(&pl08x->adev->dev,
+ "%-3s %-9s %-10s %-10s %-10s %s\n",
+ "lli", "", "csrc", "cdst", "clli", "cctl");
for (i = 0; i < num_llis; i++) {
dev_vdbg(&pl08x->adev->dev,
- "lli %d @%p: csrc=0x%08x, cdst=0x%08x, cctl=0x%08x, clli=0x%08x\n",
- i,
- &llis_va[i],
- llis_va[i].src,
- llis_va[i].dst,
- llis_va[i].cctl,
- llis_va[i].lli
+ "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, &llis_va[i], llis_va[i].src,
+ llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
);
}
}
@@ -861,11 +800,19 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
struct pl08x_txd *txd)
{
+ struct pl08x_sg *dsg, *_dsg;
+
/* Free the LLI */
- dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
+ if (txd->llis_va)
+ dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
pl08x->pool_ctr--;
+ list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
+ list_del(&dsg->node);
+ kfree(dsg);
+ }
+
kfree(txd);
}
@@ -922,9 +869,7 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
* need, but for slaves the physical signals may be muxed!
* Can the platform allow us to use this channel?
*/
- if (plchan->slave &&
- ch->signal < 0 &&
- pl08x->pd->get_signal) {
+ if (plchan->slave && pl08x->pd->get_signal) {
ret = pl08x->pd->get_signal(plchan);
if (ret < 0) {
dev_dbg(&pl08x->adev->dev,
@@ -1013,10 +958,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
* If slaves are relying on interrupts to signal completion this function
* must not be called with interrupts disabled.
*/
-static enum dma_status
-pl08x_dma_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie,
- struct dma_tx_state *txstate)
+static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
dma_cookie_t last_used;
@@ -1054,64 +997,105 @@ pl08x_dma_tx_status(struct dma_chan *chan,
/* PrimeCell DMA extension */
struct burst_table {
- int burstwords;
+ u32 burstwords;
u32 reg;
};
static const struct burst_table burst_sizes[] = {
{
.burstwords = 256,
- .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_256,
},
{
.burstwords = 128,
- .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_128,
},
{
.burstwords = 64,
- .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_64,
},
{
.burstwords = 32,
- .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_32,
},
{
.burstwords = 16,
- .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_16,
},
{
.burstwords = 8,
- .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_8,
},
{
.burstwords = 4,
- .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .reg = PL080_BSIZE_4,
},
{
- .burstwords = 1,
- .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT),
+ .burstwords = 0,
+ .reg = PL080_BSIZE_1,
},
};
+/*
+ * Given the source and destination available bus masks, select which
+ * will be routed to each port. We try to have source and destination
+ * on separate ports, but always respect the allowable settings.
+ */
+static u32 pl08x_select_bus(u8 src, u8 dst)
+{
+ u32 cctl = 0;
+
+ if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
+ cctl |= PL080_CONTROL_DST_AHB2;
+ if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
+ cctl |= PL080_CONTROL_SRC_AHB2;
+
+ return cctl;
+}
+
+static u32 pl08x_cctl(u32 cctl)
+{
+ cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
+ PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
+ PL080_CONTROL_PROT_MASK);
+
+ /* Access the cell in privileged mode, non-bufferable, non-cacheable */
+ return cctl | PL080_CONTROL_PROT_SYS;
+}
+
+static u32 pl08x_width(enum dma_slave_buswidth width)
+{
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ return PL080_WIDTH_8BIT;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ return PL080_WIDTH_16BIT;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ return PL080_WIDTH_32BIT;
+ default:
+ return ~0;
+ }
+}
+
+static u32 pl08x_burst(u32 maxburst)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
+ if (burst_sizes[i].burstwords <= maxburst)
+ break;
+
+ return burst_sizes[i].reg;
+}
+
static int dma_set_runtime_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
- struct pl08x_channel_data *cd = plchan->cd;
enum dma_slave_buswidth addr_width;
- dma_addr_t addr;
- u32 maxburst;
+ u32 width, burst, maxburst;
u32 cctl = 0;
- int i;
if (!plchan->slave)
return -EINVAL;
@@ -1119,11 +1103,9 @@ static int dma_set_runtime_config(struct dma_chan *chan,
/* Transfer direction */
plchan->runtime_direction = config->direction;
if (config->direction == DMA_TO_DEVICE) {
- addr = config->dst_addr;
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
} else if (config->direction == DMA_FROM_DEVICE) {
- addr = config->src_addr;
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
} else {
@@ -1132,46 +1114,40 @@ static int dma_set_runtime_config(struct dma_chan *chan,
return -EINVAL;
}
- switch (addr_width) {
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) |
- (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT);
- break;
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) |
- (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT);
- break;
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) |
- (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT);
- break;
- default:
+ width = pl08x_width(addr_width);
+ if (width == ~0) {
dev_err(&pl08x->adev->dev,
"bad runtime_config: alien address width\n");
return -EINVAL;
}
+ cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
+ cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
+
/*
- * Now decide on a maxburst:
* If this channel will only request single transfers, set this
* down to ONE element. Also select one element if no maxburst
* is specified.
*/
- if (plchan->cd->single || maxburst == 0) {
- cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) |
- (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT);
+ if (plchan->cd->single)
+ maxburst = 1;
+
+ burst = pl08x_burst(maxburst);
+ cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
+ cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
+
+ if (plchan->runtime_direction == DMA_FROM_DEVICE) {
+ plchan->src_addr = config->src_addr;
+ plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
+ pl08x_select_bus(plchan->cd->periph_buses,
+ pl08x->mem_buses);
} else {
- for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
- if (burst_sizes[i].burstwords <= maxburst)
- break;
- cctl |= burst_sizes[i].reg;
+ plchan->dst_addr = config->dst_addr;
+ plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR |
+ pl08x_select_bus(pl08x->mem_buses,
+ plchan->cd->periph_buses);
}
- plchan->runtime_addr = addr;
-
- /* Modify the default channel data to fit PrimeCell request */
- cd->cctl = cctl;
-
dev_dbg(&pl08x->adev->dev,
"configured channel %s (%s) for %s, data width %d, "
"maxburst %d words, LE, CCTL=0x%08x\n",
@@ -1225,7 +1201,9 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
if (!num_llis) {
- kfree(txd);
+ spin_lock_irqsave(&plchan->lock, flags);
+ pl08x_free_txd(pl08x, txd);
+ spin_unlock_irqrestore(&plchan->lock, flags);
return -EINVAL;
}
@@ -1270,33 +1248,17 @@ static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
return 0;
}
-/*
- * Given the source and destination available bus masks, select which
- * will be routed to each port. We try to have source and destination
- * on separate ports, but always respect the allowable settings.
- */
-static u32 pl08x_select_bus(struct pl08x_driver_data *pl08x, u8 src, u8 dst)
-{
- u32 cctl = 0;
-
- if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
- cctl |= PL080_CONTROL_DST_AHB2;
- if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
- cctl |= PL080_CONTROL_SRC_AHB2;
-
- return cctl;
-}
-
static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
unsigned long flags)
{
- struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT);
+ struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
if (txd) {
dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
txd->tx.flags = flags;
txd->tx.tx_submit = pl08x_tx_submit;
INIT_LIST_HEAD(&txd->node);
+ INIT_LIST_HEAD(&txd->dsg_list);
/* Always enable error and terminal interrupts */
txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
@@ -1315,6 +1277,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd;
+ struct pl08x_sg *dsg;
int ret;
txd = pl08x_get_txd(plchan, flags);
@@ -1324,10 +1287,19 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
return NULL;
}
+ dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
+ if (!dsg) {
+ pl08x_free_txd(pl08x, txd);
+ dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
+ __func__);
+ return NULL;
+ }
+ list_add_tail(&dsg->node, &txd->dsg_list);
+
txd->direction = DMA_NONE;
- txd->src_addr = src;
- txd->dst_addr = dest;
- txd->len = len;
+ dsg->src_addr = src;
+ dsg->dst_addr = dest;
+ dsg->len = len;
/* Set platform data for m2m */
txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
@@ -1338,8 +1310,8 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
if (pl08x->vd->dualmaster)
- txd->cctl |= pl08x_select_bus(pl08x,
- pl08x->mem_buses, pl08x->mem_buses);
+ txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
+ pl08x->mem_buses);
ret = pl08x_prep_channel_resources(plchan, txd);
if (ret)
@@ -1356,20 +1328,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_txd *txd;
- u8 src_buses, dst_buses;
- int ret;
-
- /*
- * Current implementation ASSUMES only one sg
- */
- if (sg_len != 1) {
- dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n",
- __func__);
- BUG();
- }
+ struct pl08x_sg *dsg;
+ struct scatterlist *sg;
+ dma_addr_t slave_addr;
+ int ret, tmp;
dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
- __func__, sgl->length, plchan->name);
+ __func__, sgl->length, plchan->name);
txd = pl08x_get_txd(plchan, flags);
if (!txd) {
@@ -1388,43 +1353,48 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
* channel target address dynamically at runtime.
*/
txd->direction = direction;
- txd->len = sgl->length;
-
- txd->cctl = plchan->cd->cctl &
- ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
- PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
- PL080_CONTROL_PROT_MASK);
-
- /* Access the cell in privileged mode, non-bufferable, non-cacheable */
- txd->cctl |= PL080_CONTROL_PROT_SYS;
if (direction == DMA_TO_DEVICE) {
- txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT;
- txd->cctl |= PL080_CONTROL_SRC_INCR;
- txd->src_addr = sgl->dma_address;
- if (plchan->runtime_addr)
- txd->dst_addr = plchan->runtime_addr;
- else
- txd->dst_addr = plchan->cd->addr;
- src_buses = pl08x->mem_buses;
- dst_buses = plchan->cd->periph_buses;
+ txd->cctl = plchan->dst_cctl;
+ slave_addr = plchan->dst_addr;
} else if (direction == DMA_FROM_DEVICE) {
- txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
- txd->cctl |= PL080_CONTROL_DST_INCR;
- if (plchan->runtime_addr)
- txd->src_addr = plchan->runtime_addr;
- else
- txd->src_addr = plchan->cd->addr;
- txd->dst_addr = sgl->dma_address;
- src_buses = plchan->cd->periph_buses;
- dst_buses = pl08x->mem_buses;
+ txd->cctl = plchan->src_cctl;
+ slave_addr = plchan->src_addr;
} else {
+ pl08x_free_txd(pl08x, txd);
dev_err(&pl08x->adev->dev,
"%s direction unsupported\n", __func__);
return NULL;
}
- txd->cctl |= pl08x_select_bus(pl08x, src_buses, dst_buses);
+ if (plchan->cd->device_fc)
+ tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
+ PL080_FLOW_PER2MEM_PER;
+ else
+ tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
+ PL080_FLOW_PER2MEM;
+
+ txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
+
+ for_each_sg(sgl, sg, sg_len, tmp) {
+ dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
+ if (!dsg) {
+ pl08x_free_txd(pl08x, txd);
+ dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
+ __func__);
+ return NULL;
+ }
+ list_add_tail(&dsg->node, &txd->dsg_list);
+
+ dsg->len = sg_dma_len(sg);
+ if (direction == DMA_TO_DEVICE) {
+ dsg->src_addr = sg_phys(sg);
+ dsg->dst_addr = slave_addr;
+ } else {
+ dsg->src_addr = slave_addr;
+ dsg->dst_addr = sg_phys(sg);
+ }
+ }
ret = pl08x_prep_channel_resources(plchan, txd);
if (ret)
@@ -1499,9 +1469,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
{
- struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
+ struct pl08x_dma_chan *plchan;
char *name = chan_id;
+ /* Reject channels for devices not bound to this driver */
+ if (chan->device->dev->driver != &pl08x_amba_driver.drv)
+ return false;
+
+ plchan = to_pl08x_chan(chan);
+
/* Check that the channel is not taken! */
if (!strcmp(plchan->name, name))
return true;
@@ -1517,34 +1493,34 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
*/
static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
{
- u32 val;
-
- val = readl(pl08x->base + PL080_CONFIG);
- val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE);
- /* We implicitly clear bit 1 and that means little-endian mode */
- val |= PL080_CONFIG_ENABLE;
- writel(val, pl08x->base + PL080_CONFIG);
+ writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
}
static void pl08x_unmap_buffers(struct pl08x_txd *txd)
{
struct device *dev = txd->tx.chan->device->dev;
+ struct pl08x_sg *dsg;
if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- dma_unmap_single(dev, txd->src_addr, txd->len,
- DMA_TO_DEVICE);
- else
- dma_unmap_page(dev, txd->src_addr, txd->len,
- DMA_TO_DEVICE);
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_single(dev, dsg->src_addr, dsg->len,
+ DMA_TO_DEVICE);
+ else {
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_page(dev, dsg->src_addr, dsg->len,
+ DMA_TO_DEVICE);
+ }
}
if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- dma_unmap_single(dev, txd->dst_addr, txd->len,
- DMA_FROM_DEVICE);
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_single(dev, dsg->dst_addr, dsg->len,
+ DMA_FROM_DEVICE);
else
- dma_unmap_page(dev, txd->dst_addr, txd->len,
- DMA_FROM_DEVICE);
+ list_for_each_entry(dsg, &txd->dsg_list, node)
+ dma_unmap_page(dev, dsg->dst_addr, dsg->len,
+ DMA_FROM_DEVICE);
}
}
@@ -1599,8 +1575,8 @@ static void pl08x_tasklet(unsigned long data)
*/
list_for_each_entry(waiting, &pl08x->memcpy.channels,
chan.device_node) {
- if (waiting->state == PL08X_CHAN_WAITING &&
- waiting->waiting != NULL) {
+ if (waiting->state == PL08X_CHAN_WAITING &&
+ waiting->waiting != NULL) {
int ret;
/* This should REALLY not fail now */
@@ -1640,50 +1616,64 @@ static void pl08x_tasklet(unsigned long data)
static irqreturn_t pl08x_irq(int irq, void *dev)
{
struct pl08x_driver_data *pl08x = dev;
- u32 mask = 0;
- u32 val;
- int i;
-
- val = readl(pl08x->base + PL080_ERR_STATUS);
- if (val) {
- /* An error interrupt (on one or more channels) */
- dev_err(&pl08x->adev->dev,
- "%s error interrupt, register value 0x%08x\n",
- __func__, val);
- /*
- * Simply clear ALL PL08X error interrupts,
- * regardless of channel and cause
- * FIXME: should be 0x00000003 on PL081 really.
- */
- writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
+ u32 mask = 0, err, tc, i;
+
+ /* check & clear - ERR & TC interrupts */
+ err = readl(pl08x->base + PL080_ERR_STATUS);
+ if (err) {
+ dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
+ __func__, err);
+ writel(err, pl08x->base + PL080_ERR_CLEAR);
}
- val = readl(pl08x->base + PL080_INT_STATUS);
+ tc = readl(pl08x->base + PL080_INT_STATUS);
+ if (tc)
+ writel(tc, pl08x->base + PL080_TC_CLEAR);
+
+ if (!err && !tc)
+ return IRQ_NONE;
+
for (i = 0; i < pl08x->vd->channels; i++) {
- if ((1 << i) & val) {
+ if (((1 << i) & err) || ((1 << i) & tc)) {
/* Locate physical channel */
struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
struct pl08x_dma_chan *plchan = phychan->serving;
+ if (!plchan) {
+ dev_err(&pl08x->adev->dev,
+ "%s Error TC interrupt on unused channel: 0x%08x\n",
+ __func__, i);
+ continue;
+ }
+
/* Schedule tasklet on this channel */
tasklet_schedule(&plchan->tasklet);
-
mask |= (1 << i);
}
}
- /* Clear only the terminal interrupts on channels we processed */
- writel(mask, pl08x->base + PL080_TC_CLEAR);
return mask ? IRQ_HANDLED : IRQ_NONE;
}
+static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
+{
+ u32 cctl = pl08x_cctl(chan->cd->cctl);
+
+ chan->slave = true;
+ chan->name = chan->cd->bus_id;
+ chan->src_addr = chan->cd->addr;
+ chan->dst_addr = chan->cd->addr;
+ chan->src_cctl = cctl | PL080_CONTROL_DST_INCR |
+ pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses);
+ chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR |
+ pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses);
+}
+
/*
* Initialise the DMAC memcpy/slave channels.
* Make a local wrapper to hold required data
*/
static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
- struct dma_device *dmadev,
- unsigned int channels,
- bool slave)
+ struct dma_device *dmadev, unsigned int channels, bool slave)
{
struct pl08x_dma_chan *chan;
int i;
@@ -1696,7 +1686,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
* to cope with that situation.
*/
for (i = 0; i < channels; i++) {
- chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL);
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!chan) {
dev_err(&pl08x->adev->dev,
"%s no memory for channel\n", __func__);
@@ -1707,9 +1697,8 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
chan->state = PL08X_CHAN_IDLE;
if (slave) {
- chan->slave = true;
- chan->name = pl08x->pd->slave_channels[i].bus_id;
chan->cd = &pl08x->pd->slave_channels[i];
+ pl08x_dma_slave_init(chan);
} else {
chan->cd = &pl08x->pd->memcpy_channel;
chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
@@ -1725,7 +1714,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
kfree(chan);
continue;
}
- dev_info(&pl08x->adev->dev,
+ dev_dbg(&pl08x->adev->dev,
"initialize virtual channel \"%s\"\n",
chan->name);
@@ -1834,9 +1823,9 @@ static const struct file_operations pl08x_debugfs_operations = {
static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
{
/* Expose a simple debugfs interface to view all clocks */
- (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO,
- NULL, pl08x,
- &pl08x_debugfs_operations);
+ (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
+ S_IFREG | S_IRUGO, NULL, pl08x,
+ &pl08x_debugfs_operations);
}
#else
@@ -1857,12 +1846,15 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
return ret;
/* Create the driver state holder */
- pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL);
+ pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
if (!pl08x) {
ret = -ENOMEM;
goto out_no_pl08x;
}
+ pm_runtime_set_active(&adev->dev);
+ pm_runtime_enable(&adev->dev);
+
/* Initialize memcpy engine */
dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
pl08x->memcpy.dev = &adev->dev;
@@ -1936,7 +1928,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
}
/* Initialize physical channels */
- pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)),
+ pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)),
GFP_KERNEL);
if (!pl08x->phy_chans) {
dev_err(&adev->dev, "%s failed to allocate "
@@ -1953,9 +1945,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&ch->lock);
ch->serving = NULL;
ch->signal = -1;
- dev_info(&adev->dev,
- "physical channel %d is %s\n", i,
- pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
+ dev_dbg(&adev->dev, "physical channel %d is %s\n",
+ i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
}
/* Register as many memcpy channels as there are physical channels */
@@ -1971,8 +1962,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
/* Register slave channels */
ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
- pl08x->pd->num_slave_channels,
- true);
+ pl08x->pd->num_slave_channels, true);
if (ret <= 0) {
dev_warn(&pl08x->adev->dev,
"%s failed to enumerate slave channels - %d\n",
@@ -2002,6 +1992,8 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
amba_part(adev), amba_rev(adev),
(unsigned long long)adev->res.start, adev->irq[0]);
+
+ pm_runtime_put(&adev->dev);
return 0;
out_no_slave_reg:
@@ -2020,6 +2012,9 @@ out_no_ioremap:
dma_pool_destroy(pl08x->pool);
out_no_lli_pool:
out_no_platdata:
+ pm_runtime_put(&adev->dev);
+ pm_runtime_disable(&adev->dev);
+
kfree(pl08x);
out_no_pl08x:
amba_release_regions(adev);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 1357c3b..c60d9c1 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -107,10 +107,11 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
{
struct at_desc *desc, *_desc;
struct at_desc *ret = NULL;
+ unsigned long flags;
unsigned int i = 0;
LIST_HEAD(tmp_list);
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
i++;
if (async_tx_test_ack(&desc->txd)) {
@@ -121,7 +122,7 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
dev_dbg(chan2dev(&atchan->chan_common),
"desc %p not ACKed\n", desc);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
dev_vdbg(chan2dev(&atchan->chan_common),
"scanned %u descriptors on freelist\n", i);
@@ -129,9 +130,9 @@ static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
if (!ret) {
ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
if (ret) {
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated++;
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
} else {
dev_err(chan2dev(&atchan->chan_common),
"not enough descriptors available\n");
@@ -150,8 +151,9 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
{
if (desc) {
struct at_desc *child;
+ unsigned long flags;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
list_for_each_entry(child, &desc->tx_list, desc_node)
dev_vdbg(chan2dev(&atchan->chan_common),
"moving child desc %p to freelist\n",
@@ -160,7 +162,7 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
dev_vdbg(chan2dev(&atchan->chan_common),
"moving desc %p to freelist\n", desc);
list_add(&desc->desc_node, &atchan->free_list);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
}
}
@@ -295,7 +297,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* for cyclic transfers,
* no need to replay callback function while stopping */
- if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) {
+ if (!atc_chan_is_cyclic(atchan)) {
dma_async_tx_callback callback = txd->callback;
void *param = txd->callback_param;
@@ -467,16 +469,17 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
static void atc_tasklet(unsigned long data)
{
struct at_dma_chan *atchan = (struct at_dma_chan *)data;
+ unsigned long flags;
- spin_lock(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
atc_handle_error(atchan);
- else if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ else if (atc_chan_is_cyclic(atchan))
atc_handle_cyclic(atchan);
else
atc_advance_work(atchan);
- spin_unlock(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
}
static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
@@ -535,8 +538,9 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
struct at_desc *desc = txd_to_at_desc(tx);
struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
dma_cookie_t cookie;
+ unsigned long flags;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
cookie = atc_assign_cookie(atchan, desc);
if (list_empty(&atchan->active_list)) {
@@ -550,7 +554,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
list_add_tail(&desc->desc_node, &atchan->queue);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
return cookie;
}
@@ -934,28 +938,29 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device);
int chan_id = atchan->chan_common.chan_id;
+ unsigned long flags;
LIST_HEAD(list);
dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
if (cmd == DMA_PAUSE) {
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
set_bit(ATC_IS_PAUSED, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_RESUME) {
- if (!test_bit(ATC_IS_PAUSED, &atchan->status))
+ if (!atc_chan_is_paused(atchan))
return 0;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
clear_bit(ATC_IS_PAUSED, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_TERMINATE_ALL) {
struct at_desc *desc, *_desc;
/*
@@ -964,7 +969,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
* channel. We still have to poll the channel enable bit due
* to AHB/HSB limitations.
*/
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
/* disabling channel: must also remove suspend state */
dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
@@ -985,7 +990,7 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
/* if channel dedicated to cyclic operations, free it */
clear_bit(ATC_IS_CYCLIC, &atchan->status);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
} else {
return -ENXIO;
}
@@ -1011,9 +1016,10 @@ atc_tx_status(struct dma_chan *chan,
struct at_dma_chan *atchan = to_at_dma_chan(chan);
dma_cookie_t last_used;
dma_cookie_t last_complete;
+ unsigned long flags;
enum dma_status ret;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
last_complete = atchan->completed_cookie;
last_used = chan->cookie;
@@ -1028,7 +1034,7 @@ atc_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
if (ret != DMA_SUCCESS)
dma_set_tx_state(txstate, last_complete, last_used,
@@ -1036,7 +1042,7 @@ atc_tx_status(struct dma_chan *chan,
else
dma_set_tx_state(txstate, last_complete, last_used, 0);
- if (test_bit(ATC_IS_PAUSED, &atchan->status))
+ if (atc_chan_is_paused(atchan))
ret = DMA_PAUSED;
dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
@@ -1053,18 +1059,19 @@ atc_tx_status(struct dma_chan *chan,
static void atc_issue_pending(struct dma_chan *chan)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ unsigned long flags;
dev_vdbg(chan2dev(chan), "issue_pending\n");
/* Not needed for cyclic transfers */
- if (test_bit(ATC_IS_CYCLIC, &atchan->status))
+ if (atc_chan_is_cyclic(atchan))
return;
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
if (!atc_chan_is_enabled(atchan)) {
atc_advance_work(atchan);
}
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
}
/**
@@ -1080,6 +1087,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
struct at_dma *atdma = to_at_dma(chan->device);
struct at_desc *desc;
struct at_dma_slave *atslave;
+ unsigned long flags;
int i;
u32 cfg;
LIST_HEAD(tmp_list);
@@ -1123,11 +1131,11 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
list_add_tail(&desc->desc_node, &tmp_list);
}
- spin_lock_bh(&atchan->lock);
+ spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated = i;
list_splice(&tmp_list, &atchan->free_list);
atchan->completed_cookie = chan->cookie = 1;
- spin_unlock_bh(&atchan->lock);
+ spin_unlock_irqrestore(&atchan->lock, flags);
/* channel parameters */
channel_writel(atchan, CFG, cfg);
@@ -1223,7 +1231,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
atdma->dma_common.cap_mask = pdata->cap_mask;
atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
- size = io->end - io->start + 1;
+ size = resource_size(io);
if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
err = -EBUSY;
goto err_kfree;
@@ -1267,12 +1275,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* initialize channels related values */
INIT_LIST_HEAD(&atdma->dma_common.channels);
- for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) {
+ for (i = 0; i < pdata->nr_channels; i++) {
struct at_dma_chan *atchan = &atdma->chan[i];
atchan->chan_common.device = &atdma->dma_common;
atchan->chan_common.cookie = atchan->completed_cookie = 1;
- atchan->chan_common.chan_id = i;
list_add_tail(&atchan->chan_common.device_node,
&atdma->dma_common.channels);
@@ -1300,22 +1307,20 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
- if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask))
+ if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
-
- if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
+ /* controller can do slave DMA: can trigger cyclic transfers */
+ dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
-
- if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ||
- dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask))
atdma->dma_common.device_control = atc_control;
+ }
dma_writel(atdma, EN, AT_DMA_ENABLE);
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
- atdma->dma_common.chancnt);
+ pdata->nr_channels);
dma_async_device_register(&atdma->dma_common);
@@ -1369,7 +1374,7 @@ static int __exit at_dma_remove(struct platform_device *pdev)
atdma->regs = NULL;
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(io->start, io->end - io->start + 1);
+ release_mem_region(io->start, resource_size(io));
kfree(atdma);
@@ -1384,27 +1389,112 @@ static void at_dma_shutdown(struct platform_device *pdev)
clk_disable(atdma->clk);
}
+static int at_dma_prepare(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct at_dma *atdma = platform_get_drvdata(pdev);
+ struct dma_chan *chan, *_chan;
+
+ list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+ device_node) {
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ /* wait for transaction completion (except in cyclic case) */
+ if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static void atc_suspend_cyclic(struct at_dma_chan *atchan)
+{
+ struct dma_chan *chan = &atchan->chan_common;
+
+ /* Channel should be paused by user
+ * do it anyway even if it is not done already */
+ if (!atc_chan_is_paused(atchan)) {
+ dev_warn(chan2dev(chan),
+ "cyclic channel not paused, should be done by channel user\n");
+ atc_control(chan, DMA_PAUSE, 0);
+ }
+
+ /* now preserve additional data for cyclic operations */
+ /* next descriptor address in the cyclic list */
+ atchan->save_dscr = channel_readl(atchan, DSCR);
+
+ vdbg_dump_regs(atchan);
+}
+
static int at_dma_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev);
+ struct dma_chan *chan, *_chan;
- at_dma_off(platform_get_drvdata(pdev));
+ /* preserve data */
+ list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+ device_node) {
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+
+ if (atc_chan_is_cyclic(atchan))
+ atc_suspend_cyclic(atchan);
+ atchan->save_cfg = channel_readl(atchan, CFG);
+ }
+ atdma->save_imr = dma_readl(atdma, EBCIMR);
+
+ /* disable DMA controller */
+ at_dma_off(atdma);
clk_disable(atdma->clk);
return 0;
}
+static void atc_resume_cyclic(struct at_dma_chan *atchan)
+{
+ struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
+
+ /* restore channel status for cyclic descriptors list:
+ * next descriptor in the cyclic list at the time of suspend */
+ channel_writel(atchan, SADDR, 0);
+ channel_writel(atchan, DADDR, 0);
+ channel_writel(atchan, CTRLA, 0);
+ channel_writel(atchan, CTRLB, 0);
+ channel_writel(atchan, DSCR, atchan->save_dscr);
+ dma_writel(atdma, CHER, atchan->mask);
+
+ /* channel pause status should be removed by channel user
+ * We cannot take the initiative to do it here */
+
+ vdbg_dump_regs(atchan);
+}
+
static int at_dma_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev);
+ struct dma_chan *chan, *_chan;
+ /* bring back DMA controller */
clk_enable(atdma->clk);
dma_writel(atdma, EN, AT_DMA_ENABLE);
+
+ /* clear any pending interrupt */
+ while (dma_readl(atdma, EBCISR))
+ cpu_relax();
+
+ /* restore saved data */
+ dma_writel(atdma, EBCIER, atdma->save_imr);
+ list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
+ device_node) {
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+
+ channel_writel(atchan, CFG, atchan->save_cfg);
+ if (atc_chan_is_cyclic(atchan))
+ atc_resume_cyclic(atchan);
+ }
return 0;
}
static const struct dev_pm_ops at_dma_dev_pm_ops = {
+ .prepare = at_dma_prepare,
.suspend_noirq = at_dma_suspend_noirq,
.resume_noirq = at_dma_resume_noirq,
};
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 19ed470..5aa82b4 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -204,6 +204,9 @@ enum atc_status {
* @status: transmit status information from irq/prep* functions
* to tasklet (use atomic operations)
* @tasklet: bottom half to finish transaction work
+ * @save_cfg: configuration register that is saved on suspend/resume cycle
+ * @save_dscr: for cyclic operations, preserve next descriptor address in
+ * the cyclic list on suspend/resume cycle
* @lock: serializes enqueue/dequeue operations to descriptors lists
* @completed_cookie: identifier for the most recently completed operation
* @active_list: list of descriptors dmaengine is being running on
@@ -218,6 +221,8 @@ struct at_dma_chan {
u8 mask;
unsigned long status;
struct tasklet_struct tasklet;
+ u32 save_cfg;
+ u32 save_dscr;
spinlock_t lock;
@@ -248,6 +253,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
* @chan_common: common dmaengine dma_device object members
* @ch_regs: memory mapped register base
* @clk: dma controller clock
+ * @save_imr: interrupt mask register that is saved on suspend/resume cycle
* @all_chan_mask: all channels availlable in a mask
* @dma_desc_pool: base of DMA descriptor region (DMA address)
* @chan: channels table to store at_dma_chan structures
@@ -256,6 +262,7 @@ struct at_dma {
struct dma_device dma_common;
void __iomem *regs;
struct clk *clk;
+ u32 save_imr;
u8 all_chan_mask;
@@ -354,6 +361,23 @@ static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
return !!(dma_readl(atdma, CHSR) & atchan->mask);
}
+/**
+ * atc_chan_is_paused - test channel pause/resume status
+ * @atchan: channel we want to test status
+ */
+static inline int atc_chan_is_paused(struct at_dma_chan *atchan)
+{
+ return test_bit(ATC_IS_PAUSED, &atchan->status);
+}
+
+/**
+ * atc_chan_is_cyclic - test if given channel has cyclic property set
+ * @atchan: channel we want to test status
+ */
+static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan)
+{
+ return test_bit(ATC_IS_CYCLIC, &atchan->status);
+}
/**
* set_desc_eol - set end-of-link to descriptor so it will end transfer
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index af8c0b5..4234f41 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/kernel.h> /* printk() */
#include <linux/fs.h> /* everything... */
+#include <linux/scatterlist.h>
#include <linux/slab.h> /* kmalloc() */
#include <linux/dmaengine.h>
#include <linux/platform_device.h>
@@ -40,6 +41,8 @@ struct coh901318_desc {
struct coh901318_lli *lli;
enum dma_data_direction dir;
unsigned long flags;
+ u32 head_config;
+ u32 head_ctrl;
};
struct coh901318_base {
@@ -660,6 +663,9 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
coh901318_desc_submit(cohc, cohd);
+ /* Program the transaction head */
+ coh901318_set_conf(cohc, cohd->head_config);
+ coh901318_set_ctrl(cohc, cohd->head_ctrl);
coh901318_prep_linked_list(cohc, cohd->lli);
/* start dma job on this channel */
@@ -1090,8 +1096,6 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
} else
goto err_direction;
- coh901318_set_conf(cohc, config);
-
/* The dma only supports transmitting packages up to
* MAX_DMA_PACKET_SIZE. Calculate to total number of
* dma elemts required to send the entire sg list
@@ -1128,16 +1132,18 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (ret)
goto err_lli_fill;
- /*
- * Set the default ctrl for the channel to the one from the lli,
- * things may have changed due to odd buffer alignment etc.
- */
- coh901318_set_ctrl(cohc, lli->control);
COH_DBG(coh901318_list_print(cohc, lli));
/* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
+ cohd->head_config = config;
+ /*
+ * Set the default head ctrl for the channel to the one from the
+ * lli, things may have changed due to odd buffer alignment
+ * etc.
+ */
+ cohd->head_ctrl = lli->control;
cohd->dir = direction;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 8bcb15f..5991114 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -45,6 +45,7 @@
* See Documentation/dmaengine.txt for more details
*/
+#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
@@ -61,9 +62,9 @@
#include <linux/slab.h>
static DEFINE_MUTEX(dma_list_mutex);
+static DEFINE_IDR(dma_idr);
static LIST_HEAD(dma_device_list);
static long dmaengine_ref_count;
-static struct idr dma_idr;
/* --- sysfs implementation --- */
@@ -509,8 +510,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
dma_chan_name(chan));
list_del_rcu(&device->global_node);
} else if (err)
- pr_err("dmaengine: failed to get %s: (%d)\n",
- dma_chan_name(chan), err);
+ pr_debug("dmaengine: failed to get %s: (%d)\n",
+ dma_chan_name(chan), err);
else
break;
if (--device->privatecnt == 0)
@@ -563,8 +564,8 @@ void dmaengine_get(void)
list_del_rcu(&device->global_node);
break;
} else if (err)
- pr_err("dmaengine: failed to get %s: (%d)\n",
- dma_chan_name(chan), err);
+ pr_debug("%s: failed to get %s: (%d)\n",
+ __func__, dma_chan_name(chan), err);
}
}
@@ -1049,8 +1050,6 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies);
static int __init dma_bus_init(void)
{
- idr_init(&dma_idr);
- mutex_init(&dma_list_mutex);
return class_register(&dma_devclass);
}
arch_initcall(dma_bus_init);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index b4f5c32..eb1d864 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -8,7 +8,9 @@
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
+#include <linux/freezer.h>
#include <linux/init.h>
#include <linux/kthread.h>
#include <linux/module.h>
@@ -250,6 +252,7 @@ static int dmatest_func(void *data)
int i;
thread_name = current->comm;
+ set_freezable_with_signal();
ret = -ENOMEM;
@@ -304,7 +307,8 @@ static int dmatest_func(void *data)
dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt];
struct completion cmp;
- unsigned long tmo = msecs_to_jiffies(timeout);
+ unsigned long start, tmo, end = 0 /* compiler... */;
+ bool reload = true;
u8 align = 0;
total_tests++;
@@ -403,7 +407,17 @@ static int dmatest_func(void *data)
}
dma_async_issue_pending(chan);
- tmo = wait_for_completion_timeout(&cmp, tmo);
+ do {
+ start = jiffies;
+ if (reload)
+ end = start + msecs_to_jiffies(timeout);
+ else if (end <= start)
+ end = start + 1;
+ tmo = wait_for_completion_interruptible_timeout(&cmp,
+ end - start);
+ reload = try_to_freeze();
+ } while (tmo == -ERESTARTSYS);
+
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
if (tmo == 0) {
@@ -476,6 +490,8 @@ err_srcs:
pr_notice("%s: terminating after %u tests, %u failures (status %d)\n",
thread_name, total_tests, failed_tests, ret);
+ /* terminate all transfers on specified channels */
+ chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
if (iterations > 0)
while (!kthread_should_stop()) {
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit);
@@ -498,6 +514,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc)
list_del(&thread->node);
kfree(thread);
}
+
+ /* terminate all transfers on specified channels */
+ dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0);
+
kfree(dtc);
}
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 4d180ca..9bfd6d3 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1407,12 +1407,11 @@ static int __init dw_probe(struct platform_device *pdev)
dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
INIT_LIST_HEAD(&dw->dma.channels);
- for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) {
+ for (i = 0; i < pdata->nr_channels; i++) {
struct dw_dma_chan *dwc = &dw->chan[i];
dwc->chan.device = &dw->dma;
dwc->chan.cookie = dwc->completed = 1;
- dwc->chan.chan_id = i;
if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
list_add_tail(&dwc->chan.device_node,
&dw->dma.channels);
@@ -1468,7 +1467,7 @@ static int __init dw_probe(struct platform_device *pdev)
dma_writel(dw, CFG, DW_CFG_DMA_EN);
printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
- dev_name(&pdev->dev), dw->dma.chancnt);
+ dev_name(&pdev->dev), pdata->nr_channels);
dma_async_device_register(&dw->dma);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index e18eaab..4be55f9 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -14,6 +14,7 @@
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
@@ -23,6 +24,7 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
+#include <linux/module.h>
#include <asm/irq.h>
#include <mach/dma-v1.h>
@@ -135,7 +137,8 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
if (ret)
return ret;
- imx_dma_config_burstlen(imxdmac->imxdma_channel, imxdmac->watermark_level);
+ imx_dma_config_burstlen(imxdmac->imxdma_channel,
+ imxdmac->watermark_level * imxdmac->word_size);
return 0;
default:
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index b6d1455..f993955 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -18,6 +18,7 @@
*/
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
@@ -32,6 +33,9 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
#include <asm/irq.h>
#include <mach/sdma.h>
@@ -65,8 +69,8 @@
#define SDMA_ONCE_RTB 0x060
#define SDMA_XTRIG_CONF1 0x070
#define SDMA_XTRIG_CONF2 0x074
-#define SDMA_CHNENBL0_V2 0x200
-#define SDMA_CHNENBL0_V1 0x080
+#define SDMA_CHNENBL0_IMX35 0x200
+#define SDMA_CHNENBL0_IMX31 0x080
#define SDMA_CHNPRI_0 0x100
/*
@@ -299,21 +303,47 @@ struct sdma_firmware_header {
u32 ram_code_size;
};
+enum sdma_devtype {
+ IMX31_SDMA, /* runs on i.mx31 */
+ IMX35_SDMA, /* runs on i.mx35 and later */
+};
+
struct sdma_engine {
struct device *dev;
struct device_dma_parameters dma_parms;
struct sdma_channel channel[MAX_DMA_CHANNELS];
struct sdma_channel_control *channel_control;
void __iomem *regs;
- unsigned int version;
+ enum sdma_devtype devtype;
unsigned int num_events;
struct sdma_context_data *context;
dma_addr_t context_phys;
struct dma_device dma_device;
struct clk *clk;
+ struct mutex channel_0_lock;
struct sdma_script_start_addrs *script_addrs;
};
+static struct platform_device_id sdma_devtypes[] = {
+ {
+ .name = "imx31-sdma",
+ .driver_data = IMX31_SDMA,
+ }, {
+ .name = "imx35-sdma",
+ .driver_data = IMX35_SDMA,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, sdma_devtypes);
+
+static const struct of_device_id sdma_dt_ids[] = {
+ { .compatible = "fsl,imx31-sdma", .data = &sdma_devtypes[IMX31_SDMA], },
+ { .compatible = "fsl,imx35-sdma", .data = &sdma_devtypes[IMX35_SDMA], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sdma_dt_ids);
+
#define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */
#define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */
#define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */
@@ -321,8 +351,8 @@ struct sdma_engine {
static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
{
- u32 chnenbl0 = (sdma->version == 2 ? SDMA_CHNENBL0_V2 : SDMA_CHNENBL0_V1);
-
+ u32 chnenbl0 = (sdma->devtype == IMX31_SDMA ? SDMA_CHNENBL0_IMX31 :
+ SDMA_CHNENBL0_IMX35);
return chnenbl0 + event * 4;
}
@@ -388,11 +418,15 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
dma_addr_t buf_phys;
int ret;
+ mutex_lock(&sdma->channel_0_lock);
+
buf_virt = dma_alloc_coherent(NULL,
size,
&buf_phys, GFP_KERNEL);
- if (!buf_virt)
- return -ENOMEM;
+ if (!buf_virt) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
bd0->mode.command = C0_SETPM;
bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
@@ -406,6 +440,9 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
dma_free_coherent(NULL, size, buf_virt, buf_phys);
+err_out:
+ mutex_unlock(&sdma->channel_0_lock);
+
return ret;
}
@@ -629,6 +666,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
+ mutex_lock(&sdma->channel_0_lock);
+
memset(context, 0, sizeof(*context));
context->channel_state.pc = load_address;
@@ -649,6 +688,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
ret = sdma_run_channel(&sdma->channel[0]);
+ mutex_unlock(&sdma->channel_0_lock);
+
return ret;
}
@@ -1104,26 +1145,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
saddr_arr[i] = addr_arr[i];
}
-static int __init sdma_get_firmware(struct sdma_engine *sdma,
- const char *cpu_name, int to_version)
+static void sdma_load_firmware(const struct firmware *fw, void *context)
{
- const struct firmware *fw;
- char *fwname;
+ struct sdma_engine *sdma = context;
const struct sdma_firmware_header *header;
- int ret;
const struct sdma_script_start_addrs *addr;
unsigned short *ram_code;
- fwname = kasprintf(GFP_KERNEL, "sdma-%s-to%d.bin", cpu_name, to_version);
- if (!fwname)
- return -ENOMEM;
-
- ret = request_firmware(&fw, fwname, sdma->dev);
- if (ret) {
- kfree(fwname);
- return ret;
+ if (!fw) {
+ dev_err(sdma->dev, "firmware not found\n");
+ return;
}
- kfree(fwname);
if (fw->size < sizeof(*header))
goto err_firmware;
@@ -1153,6 +1185,16 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma,
err_firmware:
release_firmware(fw);
+}
+
+static int __init sdma_get_firmware(struct sdma_engine *sdma,
+ const char *fw_name)
+{
+ int ret;
+
+ ret = request_firmware_nowait(THIS_MODULE,
+ FW_ACTION_HOTPLUG, fw_name, sdma->dev,
+ GFP_KERNEL, sdma, sdma_load_firmware);
return ret;
}
@@ -1162,15 +1204,16 @@ static int __init sdma_init(struct sdma_engine *sdma)
int i, ret;
dma_addr_t ccb_phys;
- switch (sdma->version) {
- case 1:
+ switch (sdma->devtype) {
+ case IMX31_SDMA:
sdma->num_events = 32;
break;
- case 2:
+ case IMX35_SDMA:
sdma->num_events = 48;
break;
default:
- dev_err(sdma->dev, "Unknown version %d. aborting\n", sdma->version);
+ dev_err(sdma->dev, "Unknown sdma type %d. aborting\n",
+ sdma->devtype);
return -ENODEV;
}
@@ -1239,22 +1282,29 @@ err_dma_alloc:
static int __init sdma_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id =
+ of_match_device(sdma_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
+ const char *fw_name;
int ret;
int irq;
struct resource *iores;
struct sdma_platform_data *pdata = pdev->dev.platform_data;
int i;
struct sdma_engine *sdma;
+ s32 *saddr_arr;
sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
if (!sdma)
return -ENOMEM;
+ mutex_init(&sdma->channel_0_lock);
+
sdma->dev = &pdev->dev;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!iores || irq < 0 || !pdata) {
+ if (!iores || irq < 0) {
ret = -EINVAL;
goto err_irq;
}
@@ -1281,10 +1331,19 @@ static int __init sdma_probe(struct platform_device *pdev)
goto err_request_irq;
sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
- if (!sdma->script_addrs)
+ if (!sdma->script_addrs) {
+ ret = -ENOMEM;
goto err_alloc;
+ }
+
+ /* initially no scripts available */
+ saddr_arr = (s32 *)sdma->script_addrs;
+ for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
+ saddr_arr[i] = -EINVAL;
- sdma->version = pdata->sdma_version;
+ if (of_id)
+ pdev->id_entry = of_id->data;
+ sdma->devtype = pdev->id_entry->driver_data;
dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
@@ -1314,10 +1373,30 @@ static int __init sdma_probe(struct platform_device *pdev)
if (ret)
goto err_init;
- if (pdata->script_addrs)
+ if (pdata && pdata->script_addrs)
sdma_add_scripts(sdma, pdata->script_addrs);
- sdma_get_firmware(sdma, pdata->cpu_name, pdata->to_version);
+ if (pdata) {
+ sdma_get_firmware(sdma, pdata->fw_name);
+ } else {
+ /*
+ * Because that device tree does not encode ROM script address,
+ * the RAM script in firmware is mandatory for device tree
+ * probe, otherwise it fails.
+ */
+ ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
+ &fw_name);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get firmware name\n");
+ goto err_init;
+ }
+
+ ret = sdma_get_firmware(sdma, fw_name);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get firmware\n");
+ goto err_init;
+ }
+ }
sdma->dma_device.dev = &pdev->dev;
@@ -1365,7 +1444,9 @@ static int __exit sdma_remove(struct platform_device *pdev)
static struct platform_driver sdma_driver = {
.driver = {
.name = "imx-sdma",
+ .of_match_table = sdma_dt_ids,
},
+ .id_table = sdma_devtypes,
.remove = __exit_p(sdma_remove),
};
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index f653517..19a0c64 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/intel_mid_dma.h>
+#include <linux/module.h>
#define MAX_CHAN 4 /*max ch across controllers*/
#include "intel_mid_dma_regs.h"
@@ -115,16 +116,15 @@ DMAC1 interrupt Functions*/
/**
* dmac1_mask_periphral_intr - mask the periphral interrupt
- * @midc: dma channel for which masking is required
+ * @mid: dma device for which masking is required
*
* Masks the DMA periphral interrupt
* this is valid for DMAC1 family controllers only
* This controller should have periphral mask registers already mapped
*/
-static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc)
+static void dmac1_mask_periphral_intr(struct middma_device *mid)
{
u32 pimr;
- struct middma_device *mid = to_middma_device(midc->chan.device);
if (mid->pimr_mask) {
pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK);
@@ -184,7 +184,6 @@ static void enable_dma_interrupt(struct intel_mid_dma_chan *midc)
static void disable_dma_interrupt(struct intel_mid_dma_chan *midc)
{
/*Check LPE PISR, make sure fwd is disabled*/
- dmac1_mask_periphral_intr(midc);
iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK);
iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR);
iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR);
@@ -1114,7 +1113,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
midch->chan.device = &dma->common;
midch->chan.cookie = 1;
- midch->chan.chan_id = i;
midch->ch_id = dma->chan_base + i;
pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
@@ -1150,7 +1148,6 @@ static int mid_setup_dma(struct pci_dev *pdev)
dma_cap_set(DMA_SLAVE, dma->common.cap_mask);
dma_cap_set(DMA_PRIVATE, dma->common.cap_mask);
dma->common.dev = &pdev->dev;
- dma->common.chancnt = dma->max_chan;
dma->common.device_alloc_chan_resources =
intel_mid_dma_alloc_chan_resources;
@@ -1350,8 +1347,8 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
if (device->ch[i].in_use)
return -EAGAIN;
}
+ dmac1_mask_periphral_intr(device);
device->state = SUSPENDED;
- pci_set_drvdata(pci, device);
pci_save_state(pci);
pci_disable_device(pci);
pci_set_power_state(pci, PCI_D3hot);
@@ -1380,7 +1377,6 @@ int dma_resume(struct pci_dev *pci)
}
device->state = RUNNING;
iowrite32(REG_BIT0, device->dma_base + DMA_CFG);
- pci_set_drvdata(pci, device);
return 0;
}
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index a4d6cb0..3f89386 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -75,7 +75,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
chan = ioat_chan_by_index(instance, bit);
- tasklet_schedule(&chan->cleanup_task);
+ if (test_bit(IOAT_RUN, &chan->state))
+ tasklet_schedule(&chan->cleanup_task);
}
writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
@@ -91,7 +92,8 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
{
struct ioat_chan_common *chan = data;
- tasklet_schedule(&chan->cleanup_task);
+ if (test_bit(IOAT_RUN, &chan->state))
+ tasklet_schedule(&chan->cleanup_task);
return IRQ_HANDLED;
}
@@ -113,7 +115,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c
chan->timer.function = device->timer_fn;
chan->timer.data = data;
tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
- tasklet_disable(&chan->cleanup_task);
}
/**
@@ -356,13 +357,43 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
writel(((u64) chan->completion_dma) >> 32,
chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
- tasklet_enable(&chan->cleanup_task);
+ set_bit(IOAT_RUN, &chan->state);
ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
__func__, ioat->desccount);
return ioat->desccount;
}
+void ioat_stop(struct ioat_chan_common *chan)
+{
+ struct ioatdma_device *device = chan->device;
+ struct pci_dev *pdev = device->pdev;
+ int chan_id = chan_num(chan);
+
+ /* 1/ stop irq from firing tasklets
+ * 2/ stop the tasklet from re-arming irqs
+ */
+ clear_bit(IOAT_RUN, &chan->state);
+
+ /* flush inflight interrupts */
+#ifdef CONFIG_PCI_MSI
+ if (pdev->msix_enabled) {
+ struct msix_entry *msix = &device->msix_entries[chan_id];
+ synchronize_irq(msix->vector);
+ } else
+#endif
+ synchronize_irq(pdev->irq);
+
+ /* flush inflight timers */
+ del_timer_sync(&chan->timer);
+
+ /* flush inflight tasklet runs */
+ tasklet_kill(&chan->cleanup_task);
+
+ /* final cleanup now that everything is quiesced and can't re-arm */
+ device->cleanup_fn((unsigned long) &chan->common);
+}
+
/**
* ioat1_dma_free_chan_resources - release all the descriptors
* @chan: the channel to be cleaned
@@ -381,9 +412,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c)
if (ioat->desccount == 0)
return;
- tasklet_disable(&chan->cleanup_task);
- del_timer_sync(&chan->timer);
- ioat1_cleanup(ioat);
+ ioat_stop(chan);
/* Delay 100ms after reset to allow internal DMA logic to quiesce
* before removing DMA descriptor resources.
@@ -528,8 +557,11 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
static void ioat1_cleanup_event(unsigned long data)
{
struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat1_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
@@ -548,9 +580,9 @@ void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
PCI_DMA_TODEVICE, flags, 0);
}
-unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
+dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
{
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
u64 completion;
completion = *chan->completion;
@@ -571,7 +603,7 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
}
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
- unsigned long *phys_complete)
+ dma_addr_t *phys_complete)
{
*phys_complete = ioat_get_current_completion(chan);
if (*phys_complete == chan->last_completion)
@@ -582,14 +614,14 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
return true;
}
-static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
+static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
{
struct ioat_chan_common *chan = &ioat->base;
struct list_head *_desc, *n;
struct dma_async_tx_descriptor *tx;
- dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n",
- __func__, phys_complete);
+ dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
+ __func__, (unsigned long long) phys_complete);
list_for_each_safe(_desc, n, &ioat->used_desc) {
struct ioat_desc_sw *desc;
@@ -655,7 +687,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
static void ioat1_cleanup(struct ioat_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
prefetch(chan->completion);
@@ -701,7 +733,7 @@ static void ioat1_timer_event(unsigned long data)
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
spin_unlock_bh(&ioat->desc_lock);
} else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
spin_lock_bh(&ioat->desc_lock);
/* if we haven't made progress and we have already
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 5216c8a..466e0f34c 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -88,7 +88,7 @@ struct ioatdma_device {
struct ioat_chan_common {
struct dma_chan common;
void __iomem *reg_base;
- unsigned long last_completion;
+ dma_addr_t last_completion;
spinlock_t cleanup_lock;
dma_cookie_t completed_cookie;
unsigned long state;
@@ -333,7 +333,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device);
void __devexit ioat_dma_remove(struct ioatdma_device *device);
struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
void __iomem *iobase);
-unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
+dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx);
enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
@@ -341,9 +341,10 @@ enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
- unsigned long *phys_complete);
+ dma_addr_t *phys_complete);
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
void ioat_kobject_del(struct ioatdma_device *device);
+void ioat_stop(struct ioat_chan_common *chan);
extern const struct sysfs_ops ioat_sysfs_ops;
extern struct ioat_sysfs_entry ioat_version_attr;
extern struct ioat_sysfs_entry ioat_cap_attr;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5d65f83..e60933e 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -126,7 +126,7 @@ static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
spin_unlock_bh(&ioat->prep_lock);
}
-static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
+static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
{
struct ioat_chan_common *chan = &ioat->base;
struct dma_async_tx_descriptor *tx;
@@ -178,7 +178,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
spin_lock_bh(&chan->cleanup_lock);
if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -189,8 +189,11 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
void ioat2_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat2_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
@@ -259,7 +262,7 @@ int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -274,7 +277,7 @@ void ioat2_timer_event(unsigned long data)
struct ioat_chan_common *chan = &ioat->base;
if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
u64 status;
status = ioat_chansts(chan);
@@ -542,10 +545,10 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
ioat->issued = 0;
ioat->tail = 0;
ioat->alloc_order = order;
+ set_bit(IOAT_RUN, &chan->state);
spin_unlock_bh(&ioat->prep_lock);
spin_unlock_bh(&chan->cleanup_lock);
- tasklet_enable(&chan->cleanup_task);
ioat2_start_null_desc(ioat);
/* check that we got off the ground */
@@ -555,7 +558,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
if (is_ioat_active(status) || is_ioat_idle(status)) {
- set_bit(IOAT_RUN, &chan->state);
return 1 << ioat->alloc_order;
} else {
u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -798,11 +800,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
if (!ioat->ring)
return;
- tasklet_disable(&chan->cleanup_task);
- del_timer_sync(&chan->timer);
- device->cleanup_fn((unsigned long) c);
+ ioat_stop(chan);
device->reset_hw(chan);
- clear_bit(IOAT_RUN, &chan->state);
spin_lock_bh(&chan->cleanup_lock);
spin_lock_bh(&ioat->prep_lock);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 6e33926..8680031 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -73,10 +73,10 @@
/* provide a lookup table for setting the source address in the base or
* extended descriptor of an xor or pq descriptor
*/
-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
+static const u8 xor_idx_to_desc = 0xe0;
+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
+static const u8 pq_idx_to_desc = 0xf8;
+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
{
@@ -256,7 +256,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc)
* The difference from the dma_v2.c __cleanup() is that this routine
* handles extended descriptors and dma-unmapping raid operations.
*/
-static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
+static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
{
struct ioat_chan_common *chan = &ioat->base;
struct ioat_ring_ent *desc;
@@ -314,7 +314,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
spin_lock_bh(&chan->cleanup_lock);
if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -325,15 +325,18 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
static void ioat3_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat3_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -348,7 +351,7 @@ static void ioat3_timer_event(unsigned long data)
struct ioat_chan_common *chan = &ioat->base;
if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
- unsigned long phys_complete;
+ dma_addr_t phys_complete;
u64 status;
status = ioat_chansts(chan);
diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c
index fab37d1..5e3a40f 100644
--- a/drivers/dma/ioat/pci.c
+++ b/drivers/dma/ioat/pci.c
@@ -72,6 +72,17 @@ static struct pci_device_id ioat_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB0) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB1) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB2) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB3) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB4) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB5) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB6) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) },
+
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ioat_pci_tbl);
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index c1a125e..0e5ef33 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -9,6 +9,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/err.h>
@@ -20,6 +21,7 @@
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <mach/ipu.h>
@@ -1306,6 +1308,7 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) {
callback = descnew->txd.callback;
callback_param = descnew->txd.callback_param;
+ list_del_init(&descnew->list);
spin_unlock(&ichan->lock);
if (callback)
callback(callback_param);
@@ -1427,39 +1430,58 @@ static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac *idmac = to_idmac(chan->device);
+ struct ipu *ipu = to_ipu(idmac);
+ struct list_head *list, *tmp;
unsigned long flags;
int i;
- /* Only supports DMA_TERMINATE_ALL */
- if (cmd != DMA_TERMINATE_ALL)
- return -ENXIO;
+ switch (cmd) {
+ case DMA_PAUSE:
+ spin_lock_irqsave(&ipu->lock, flags);
+ ipu_ic_disable_task(ipu, chan->chan_id);
- ipu_disable_channel(idmac, ichan,
- ichan->status >= IPU_CHANNEL_ENABLED);
+ /* Return all descriptors into "prepared" state */
+ list_for_each_safe(list, tmp, &ichan->queue)
+ list_del_init(list);
- tasklet_disable(&to_ipu(idmac)->tasklet);
+ ichan->sg[0] = NULL;
+ ichan->sg[1] = NULL;
- /* ichan->queue is modified in ISR, have to spinlock */
- spin_lock_irqsave(&ichan->lock, flags);
- list_splice_init(&ichan->queue, &ichan->free_list);
+ spin_unlock_irqrestore(&ipu->lock, flags);
- if (ichan->desc)
- for (i = 0; i < ichan->n_tx_desc; i++) {
- struct idmac_tx_desc *desc = ichan->desc + i;
- if (list_empty(&desc->list))
- /* Descriptor was prepared, but not submitted */
- list_add(&desc->list, &ichan->free_list);
+ ichan->status = IPU_CHANNEL_INITIALIZED;
+ break;
+ case DMA_TERMINATE_ALL:
+ ipu_disable_channel(idmac, ichan,
+ ichan->status >= IPU_CHANNEL_ENABLED);
- async_tx_clear_ack(&desc->txd);
- }
+ tasklet_disable(&ipu->tasklet);
- ichan->sg[0] = NULL;
- ichan->sg[1] = NULL;
- spin_unlock_irqrestore(&ichan->lock, flags);
+ /* ichan->queue is modified in ISR, have to spinlock */
+ spin_lock_irqsave(&ichan->lock, flags);
+ list_splice_init(&ichan->queue, &ichan->free_list);
- tasklet_enable(&to_ipu(idmac)->tasklet);
+ if (ichan->desc)
+ for (i = 0; i < ichan->n_tx_desc; i++) {
+ struct idmac_tx_desc *desc = ichan->desc + i;
+ if (list_empty(&desc->list))
+ /* Descriptor was prepared, but not submitted */
+ list_add(&desc->list, &ichan->free_list);
- ichan->status = IPU_CHANNEL_INITIALIZED;
+ async_tx_clear_ack(&desc->txd);
+ }
+
+ ichan->sg[0] = NULL;
+ ichan->sg[1] = NULL;
+ spin_unlock_irqrestore(&ichan->lock, flags);
+
+ tasklet_enable(&ipu->tasklet);
+
+ ichan->status = IPU_CHANNEL_INITIALIZED;
+ break;
+ default:
+ return -ENOSYS;
+ }
return 0;
}
@@ -1662,7 +1684,6 @@ static void __exit ipu_idmac_exit(struct ipu *ipu)
struct idmac_channel *ichan = ipu->channel + i;
idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0);
- idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0);
}
dma_async_device_unregister(&idmac->dma);
@@ -1705,16 +1726,14 @@ static int __init ipu_probe(struct platform_device *pdev)
ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base);
/* Remap IPU common registers */
- ipu_data.reg_ipu = ioremap(mem_ipu->start,
- mem_ipu->end - mem_ipu->start + 1);
+ ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu));
if (!ipu_data.reg_ipu) {
ret = -ENOMEM;
goto err_ioremap_ipu;
}
/* Remap Image Converter and Image DMA Controller registers */
- ipu_data.reg_ic = ioremap(mem_ic->start,
- mem_ic->end - mem_ic->start + 1);
+ ipu_data.reg_ic = ioremap(mem_ic->start, resource_size(mem_ic));
if (!ipu_data.reg_ic) {
ret = -ENOMEM;
goto err_ioremap_ic;
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index ab8a4ef..a71f55e 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -81,7 +81,7 @@ static struct ipu_irq_map irq_map[CONFIG_MX3_IPU_IRQS];
/* Protects allocations from the above array of maps */
static DEFINE_MUTEX(map_lock);
/* Protects register accesses and individual mappings */
-static DEFINE_SPINLOCK(bank_lock);
+static DEFINE_RAW_SPINLOCK(bank_lock);
static struct ipu_irq_map *src2map(unsigned int src)
{
@@ -101,11 +101,11 @@ static void ipu_irq_unmask(struct irq_data *d)
uint32_t reg;
unsigned long lock_flags;
- spin_lock_irqsave(&bank_lock, lock_flags);
+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
bank = map->bank;
if (!bank) {
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
return;
}
@@ -114,7 +114,7 @@ static void ipu_irq_unmask(struct irq_data *d)
reg |= (1UL << (map->source & 31));
ipu_write_reg(bank->ipu, reg, bank->control);
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
}
static void ipu_irq_mask(struct irq_data *d)
@@ -124,11 +124,11 @@ static void ipu_irq_mask(struct irq_data *d)
uint32_t reg;
unsigned long lock_flags;
- spin_lock_irqsave(&bank_lock, lock_flags);
+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
bank = map->bank;
if (!bank) {
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
return;
}
@@ -137,7 +137,7 @@ static void ipu_irq_mask(struct irq_data *d)
reg &= ~(1UL << (map->source & 31));
ipu_write_reg(bank->ipu, reg, bank->control);
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
}
static void ipu_irq_ack(struct irq_data *d)
@@ -146,17 +146,17 @@ static void ipu_irq_ack(struct irq_data *d)
struct ipu_irq_bank *bank;
unsigned long lock_flags;
- spin_lock_irqsave(&bank_lock, lock_flags);
+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
bank = map->bank;
if (!bank) {
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq);
return;
}
ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status);
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
}
/**
@@ -172,11 +172,11 @@ bool ipu_irq_status(unsigned int irq)
unsigned long lock_flags;
bool ret;
- spin_lock_irqsave(&bank_lock, lock_flags);
+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
bank = map->bank;
ret = bank && ipu_read_reg(bank->ipu, bank->status) &
(1UL << (map->source & 31));
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
return ret;
}
@@ -213,10 +213,10 @@ int ipu_irq_map(unsigned int source)
if (irq_map[i].source < 0) {
unsigned long lock_flags;
- spin_lock_irqsave(&bank_lock, lock_flags);
+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
irq_map[i].source = source;
irq_map[i].bank = irq_bank + source / 32;
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
ret = irq_map[i].irq;
pr_debug("IPU: mapped source %u to IRQ %u\n",
@@ -252,10 +252,10 @@ int ipu_irq_unmap(unsigned int source)
pr_debug("IPU: unmapped source %u from IRQ %u\n",
source, irq_map[i].irq);
- spin_lock_irqsave(&bank_lock, lock_flags);
+ raw_spin_lock_irqsave(&bank_lock, lock_flags);
irq_map[i].source = -EINVAL;
irq_map[i].bank = NULL;
- spin_unlock_irqrestore(&bank_lock, lock_flags);
+ raw_spin_unlock_irqrestore(&bank_lock, lock_flags);
ret = 0;
break;
@@ -276,7 +276,7 @@ static void ipu_irq_err(unsigned int irq, struct irq_desc *desc)
for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) {
struct ipu_irq_bank *bank = irq_bank + i;
- spin_lock(&bank_lock);
+ raw_spin_lock(&bank_lock);
status = ipu_read_reg(ipu, bank->status);
/*
* Don't think we have to clear all interrupts here, they will
@@ -284,18 +284,18 @@ static void ipu_irq_err(unsigned int irq, struct irq_desc *desc)
* might want to clear unhandled interrupts after the loop...
*/
status &= ipu_read_reg(ipu, bank->control);
- spin_unlock(&bank_lock);
+ raw_spin_unlock(&bank_lock);
while ((line = ffs(status))) {
struct ipu_irq_map *map;
line--;
status &= ~(1UL << line);
- spin_lock(&bank_lock);
+ raw_spin_lock(&bank_lock);
map = src2map(32 * i + line);
if (map)
irq = map->irq;
- spin_unlock(&bank_lock);
+ raw_spin_unlock(&bank_lock);
if (!map) {
pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
@@ -317,22 +317,22 @@ static void ipu_irq_fn(unsigned int irq, struct irq_desc *desc)
for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) {
struct ipu_irq_bank *bank = irq_bank + i;
- spin_lock(&bank_lock);
+ raw_spin_lock(&bank_lock);
status = ipu_read_reg(ipu, bank->status);
/* Not clearing all interrupts, see above */
status &= ipu_read_reg(ipu, bank->control);
- spin_unlock(&bank_lock);
+ raw_spin_unlock(&bank_lock);
while ((line = ffs(status))) {
struct ipu_irq_map *map;
line--;
status &= ~(1UL << line);
- spin_lock(&bank_lock);
+ raw_spin_lock(&bank_lock);
map = src2map(32 * i + line);
if (map)
irq = map->irq;
- spin_unlock(&bank_lock);
+ raw_spin_unlock(&bank_lock);
if (!map) {
pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index b9bae94..8ba4edc 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -741,7 +741,6 @@ static int __devinit mpc_dma_probe(struct platform_device *op)
mchan = &mdma->channels[i];
mchan->chan.device = dma;
- mchan->chan.chan_id = i;
mchan->chan.cookie = 1;
mchan->completed_cookie = mchan->chan.cookie;
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 954e334..a258101 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -218,12 +218,10 @@ static void mv_set_mode(struct mv_xor_chan *chan,
static void mv_chan_activate(struct mv_xor_chan *chan)
{
- u32 activation;
-
dev_dbg(chan->device->common.dev, " activate chan.\n");
- activation = __raw_readl(XOR_ACTIVATION(chan));
- activation |= 0x1;
- __raw_writel(activation, XOR_ACTIVATION(chan));
+
+ /* writel ensures all descriptors are flushed before activation */
+ writel(BIT(0), XOR_ACTIVATION(chan));
}
static char mv_chan_is_busy(struct mv_xor_chan *chan)
@@ -388,7 +386,8 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
dma_cookie_t cookie = 0;
int busy = mv_chan_is_busy(mv_chan);
u32 current_desc = mv_chan_get_current_desc(mv_chan);
- int seen_current = 0;
+ int current_cleaned = 0;
+ struct mv_xor_desc *hw_desc;
dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
@@ -400,38 +399,57 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
chain_node) {
- prefetch(_iter);
- prefetch(&_iter->async_tx);
- /* do not advance past the current descriptor loaded into the
- * hardware channel, subsequent descriptors are either in
- * process or have not been submitted
- */
- if (seen_current)
- break;
+ /* clean finished descriptors */
+ hw_desc = iter->hw_desc;
+ if (hw_desc->status & XOR_DESC_SUCCESS) {
+ cookie = mv_xor_run_tx_complete_actions(iter, mv_chan,
+ cookie);
- /* stop the search if we reach the current descriptor and the
- * channel is busy
- */
- if (iter->async_tx.phys == current_desc) {
- seen_current = 1;
- if (busy)
+ /* done processing desc, clean slot */
+ mv_xor_clean_slot(iter, mv_chan);
+
+ /* break if we did cleaned the current */
+ if (iter->async_tx.phys == current_desc) {
+ current_cleaned = 1;
+ break;
+ }
+ } else {
+ if (iter->async_tx.phys == current_desc) {
+ current_cleaned = 0;
break;
+ }
}
-
- cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
-
- if (mv_xor_clean_slot(iter, mv_chan))
- break;
}
if ((busy == 0) && !list_empty(&mv_chan->chain)) {
- struct mv_xor_desc_slot *chain_head;
- chain_head = list_entry(mv_chan->chain.next,
- struct mv_xor_desc_slot,
- chain_node);
-
- mv_xor_start_new_chain(mv_chan, chain_head);
+ if (current_cleaned) {
+ /*
+ * current descriptor cleaned and removed, run
+ * from list head
+ */
+ iter = list_entry(mv_chan->chain.next,
+ struct mv_xor_desc_slot,
+ chain_node);
+ mv_xor_start_new_chain(mv_chan, iter);
+ } else {
+ if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
+ /*
+ * descriptors are still waiting after
+ * current, trigger them
+ */
+ iter = list_entry(iter->chain_node.next,
+ struct mv_xor_desc_slot,
+ chain_node);
+ mv_xor_start_new_chain(mv_chan, iter);
+ } else {
+ /*
+ * some descriptors are still waiting
+ * to be cleaned
+ */
+ tasklet_schedule(&mv_chan->irq_tasklet);
+ }
+ }
}
if (cookie > 0)
@@ -1305,7 +1323,7 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
return -ENODEV;
msp->xor_base = devm_ioremap(&pdev->dev, res->start,
- res->end - res->start + 1);
+ resource_size(res));
if (!msp->xor_base)
return -EBUSY;
@@ -1314,7 +1332,7 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
return -ENODEV;
msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
- res->end - res->start + 1);
+ resource_size(res));
if (!msp->xor_high_base)
return -EBUSY;
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 977b592..ae2cfba 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -30,6 +30,7 @@
#define XOR_OPERATION_MODE_XOR 0
#define XOR_OPERATION_MODE_MEMCPY 2
#define XOR_OPERATION_MODE_MEMSET 4
+#define XOR_DESC_SUCCESS 0x40000000
#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4))
#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 88aad4f..b4588bd 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -130,6 +130,23 @@ struct mxs_dma_engine {
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
};
+static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
+{
+ struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
+ int chan_id = mxs_chan->chan.chan_id;
+ int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
+
+ /* enable apbh channel clock */
+ if (dma_is_apbh()) {
+ if (apbh_is_old())
+ writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
+ mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
+ else
+ writel(1 << chan_id,
+ mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
+ }
+}
+
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -148,38 +165,21 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
+ /* clkgate needs to be enabled before writing other registers */
+ mxs_dma_clkgate(mxs_chan, 1);
+
/* set cmd_addr up */
writel(mxs_chan->ccw_phys,
mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
- /* enable apbh channel clock */
- if (dma_is_apbh()) {
- if (apbh_is_old())
- writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
- else
- writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR);
- }
-
/* write 1 to SEMA to kick off the channel */
writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id));
}
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
{
- struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
- int chan_id = mxs_chan->chan.chan_id;
-
/* disable apbh channel clock */
- if (dma_is_apbh()) {
- if (apbh_is_old())
- writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
- else
- writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR);
- }
+ mxs_dma_clkgate(mxs_chan, 0);
mxs_chan->status = DMA_SUCCESS;
}
@@ -327,16 +327,21 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
memset(mxs_chan->ccw, 0, PAGE_SIZE);
- ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
- 0, "mxs-dma", mxs_dma);
- if (ret)
- goto err_irq;
+ if (mxs_chan->chan_irq != NO_IRQ) {
+ ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler,
+ 0, "mxs-dma", mxs_dma);
+ if (ret)
+ goto err_irq;
+ }
ret = clk_enable(mxs_dma->clk);
if (ret)
goto err_clk;
+ /* clkgate needs to be enabled for reset to finish */
+ mxs_dma_clkgate(mxs_chan, 1);
mxs_dma_reset_chan(mxs_chan);
+ mxs_dma_clkgate(mxs_chan, 0);
dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
@@ -535,6 +540,7 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
mxs_dma_disable_chan(mxs_chan);
+ mxs_dma_reset_chan(mxs_chan);
break;
case DMA_PAUSE:
mxs_dma_pause_chan(mxs_chan);
@@ -707,6 +713,8 @@ static struct platform_device_id mxs_dma_type[] = {
}, {
.name = "mxs-dma-apbx",
.driver_data = MXS_DMA_APBX,
+ }, {
+ /* end of list */
}
};
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index e6f128a..49138e7 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -872,8 +872,7 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
int i;
nr_channels = id->driver_data;
- pd = kzalloc(sizeof(struct pch_dma)+
- sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL);
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
@@ -926,7 +925,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
}
pd->dma.dev = &pdev->dev;
- pd->dma.chancnt = nr_channels;
INIT_LIST_HEAD(&pd->dma.channels);
@@ -935,7 +933,6 @@ static int __devinit pch_dma_probe(struct pci_dev *pdev,
pd_chan->chan.device = &pd->dma;
pd_chan->chan.cookie = 1;
- pd_chan->chan.chan_id = i;
pd_chan->membase = &regs->desc[i];
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 4802aac..720cace 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -17,6 +17,8 @@
#include <linux/interrupt.h>
#include <linux/amba/bus.h>
#include <linux/amba/pl330.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
#define NR_DEFAULT_DESC 16
@@ -68,6 +70,14 @@ struct dma_pl330_chan {
* NULL if the channel is available to be acquired.
*/
void *pl330_chid;
+
+ /* For D-to-M and M-to-D channels */
+ int burst_sz; /* the peripheral fifo width */
+ int burst_len; /* the number of burst */
+ dma_addr_t fifo_addr;
+
+ /* for cyclic capability */
+ bool cyclic;
};
struct dma_pl330_dmac {
@@ -82,7 +92,9 @@ struct dma_pl330_dmac {
spinlock_t pool_lock;
/* Peripheral channels connected to this DMAC */
- struct dma_pl330_chan peripherals[0]; /* keep at end */
+ struct dma_pl330_chan *peripherals; /* keep at end */
+
+ struct clk *clk;
};
struct dma_pl330_desc {
@@ -152,6 +164,31 @@ static inline void free_desc_list(struct list_head *list)
spin_unlock_irqrestore(&pdmac->pool_lock, flags);
}
+static inline void handle_cyclic_desc_list(struct list_head *list)
+{
+ struct dma_pl330_desc *desc;
+ struct dma_pl330_chan *pch;
+ unsigned long flags;
+
+ if (list_empty(list))
+ return;
+
+ list_for_each_entry(desc, list, node) {
+ dma_async_tx_callback callback;
+
+ /* Change status to reload it */
+ desc->status = PREP;
+ pch = desc->pchan;
+ callback = desc->txd.callback;
+ if (callback)
+ callback(desc->txd.callback_param);
+ }
+
+ spin_lock_irqsave(&pch->lock, flags);
+ list_splice_tail_init(list, &pch->work_list);
+ spin_unlock_irqrestore(&pch->lock, flags);
+}
+
static inline void fill_queue(struct dma_pl330_chan *pch)
{
struct dma_pl330_desc *desc;
@@ -205,7 +242,10 @@ static void pl330_tasklet(unsigned long data)
spin_unlock_irqrestore(&pch->lock, flags);
- free_desc_list(&list);
+ if (pch->cyclic)
+ handle_cyclic_desc_list(&list);
+ else
+ free_desc_list(&list);
}
static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
@@ -236,6 +276,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&pch->lock, flags);
pch->completed = chan->cookie = 1;
+ pch->cyclic = false;
pch->pl330_chid = pl330_request_channel(&pdmac->pif);
if (!pch->pl330_chid) {
@@ -253,25 +294,52 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
{
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_desc *desc;
+ struct dma_pl330_desc *desc, *_dt;
unsigned long flags;
+ struct dma_pl330_dmac *pdmac = pch->dmac;
+ struct dma_slave_config *slave_config;
+ LIST_HEAD(list);
- /* Only supports DMA_TERMINATE_ALL */
- if (cmd != DMA_TERMINATE_ALL)
- return -ENXIO;
-
- spin_lock_irqsave(&pch->lock, flags);
-
- /* FLUSH the PL330 Channel thread */
- pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ spin_lock_irqsave(&pch->lock, flags);
- /* Mark all desc done */
- list_for_each_entry(desc, &pch->work_list, node)
- desc->status = DONE;
+ /* FLUSH the PL330 Channel thread */
+ pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
- spin_unlock_irqrestore(&pch->lock, flags);
+ /* Mark all desc done */
+ list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
+ desc->status = DONE;
+ pch->completed = desc->txd.cookie;
+ list_move_tail(&desc->node, &list);
+ }
- pl330_tasklet((unsigned long) pch);
+ list_splice_tail_init(&list, &pdmac->desc_pool);
+ spin_unlock_irqrestore(&pch->lock, flags);
+ break;
+ case DMA_SLAVE_CONFIG:
+ slave_config = (struct dma_slave_config *)arg;
+
+ if (slave_config->direction == DMA_TO_DEVICE) {
+ if (slave_config->dst_addr)
+ pch->fifo_addr = slave_config->dst_addr;
+ if (slave_config->dst_addr_width)
+ pch->burst_sz = __ffs(slave_config->dst_addr_width);
+ if (slave_config->dst_maxburst)
+ pch->burst_len = slave_config->dst_maxburst;
+ } else if (slave_config->direction == DMA_FROM_DEVICE) {
+ if (slave_config->src_addr)
+ pch->fifo_addr = slave_config->src_addr;
+ if (slave_config->src_addr_width)
+ pch->burst_sz = __ffs(slave_config->src_addr_width);
+ if (slave_config->src_maxburst)
+ pch->burst_len = slave_config->src_maxburst;
+ }
+ break;
+ default:
+ dev_err(pch->dmac->pif.dev, "Not supported command.\n");
+ return -ENXIO;
+ }
return 0;
}
@@ -288,6 +356,9 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
pl330_release_channel(pch->pl330_chid);
pch->pl330_chid = NULL;
+ if (pch->cyclic)
+ list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
+
spin_unlock_irqrestore(&pch->lock, flags);
}
@@ -451,8 +522,13 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
desc->txd.cookie = 0;
async_tx_ack(&desc->txd);
- desc->req.rqtype = peri->rqtype;
- desc->req.peri = peri->peri_id;
+ if (peri) {
+ desc->req.rqtype = peri->rqtype;
+ desc->req.peri = pch->chan.chan_id;
+ } else {
+ desc->req.rqtype = MEMTOMEM;
+ desc->req.peri = 0;
+ }
dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
@@ -519,6 +595,51 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
return burst_len;
}
+static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
+ size_t period_len, enum dma_data_direction direction)
+{
+ struct dma_pl330_desc *desc;
+ struct dma_pl330_chan *pch = to_pchan(chan);
+ dma_addr_t dst;
+ dma_addr_t src;
+
+ desc = pl330_get_desc(pch);
+ if (!desc) {
+ dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
+ __func__, __LINE__);
+ return NULL;
+ }
+
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ desc->rqcfg.src_inc = 1;
+ desc->rqcfg.dst_inc = 0;
+ src = dma_addr;
+ dst = pch->fifo_addr;
+ break;
+ case DMA_FROM_DEVICE:
+ desc->rqcfg.src_inc = 0;
+ desc->rqcfg.dst_inc = 1;
+ src = pch->fifo_addr;
+ dst = dma_addr;
+ break;
+ default:
+ dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
+ __func__, __LINE__);
+ return NULL;
+ }
+
+ desc->rqcfg.brst_size = pch->burst_sz;
+ desc->rqcfg.brst_len = 1;
+
+ pch->cyclic = true;
+
+ fill_px(&desc->px, dst, src, period_len);
+
+ return &desc->txd;
+}
+
static struct dma_async_tx_descriptor *
pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
dma_addr_t src, size_t len, unsigned long flags)
@@ -529,10 +650,10 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
struct pl330_info *pi;
int burst;
- if (unlikely(!pch || !len || !peri))
+ if (unlikely(!pch || !len))
return NULL;
- if (peri->rqtype != MEMTOMEM)
+ if (peri && peri->rqtype != MEMTOMEM)
return NULL;
pi = &pch->dmac->pif;
@@ -574,10 +695,10 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct dma_pl330_peri *peri = chan->private;
struct scatterlist *sg;
unsigned long flags;
- int i, burst_size;
+ int i;
dma_addr_t addr;
- if (unlikely(!pch || !sgl || !sg_len))
+ if (unlikely(!pch || !sgl || !sg_len || !peri))
return NULL;
/* Make sure the direction is consistent */
@@ -590,8 +711,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}
- addr = peri->fifo_addr;
- burst_size = peri->burst_sz;
+ addr = pch->fifo_addr;
first = NULL;
@@ -639,7 +759,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
sg_dma_address(sg), addr, sg_dma_len(sg));
}
- desc->rqcfg.brst_size = burst_size;
+ desc->rqcfg.brst_size = pch->burst_sz;
desc->rqcfg.brst_len = 1;
}
@@ -666,17 +786,12 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
struct dma_device *pd;
struct resource *res;
int i, ret, irq;
+ int num_chan;
pdat = adev->dev.platform_data;
- if (!pdat || !pdat->nr_valid_peri) {
- dev_err(&adev->dev, "platform data missing\n");
- return -ENODEV;
- }
-
/* Allocate a new DMAC and its Channels */
- pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch)
- + sizeof(*pdmac), GFP_KERNEL);
+ pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
if (!pdmac) {
dev_err(&adev->dev, "unable to allocate mem\n");
return -ENOMEM;
@@ -685,7 +800,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pi = &pdmac->pif;
pi->dev = &adev->dev;
pi->pl330_data = NULL;
- pi->mcbufsz = pdat->mcbuf_sz;
+ pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
res = &adev->res;
request_mem_region(res->start, resource_size(res), "dma-pl330");
@@ -696,6 +811,30 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
goto probe_err1;
}
+ pdmac->clk = clk_get(&adev->dev, "dma");
+ if (IS_ERR(pdmac->clk)) {
+ dev_err(&adev->dev, "Cannot get operation clock.\n");
+ ret = -EINVAL;
+ goto probe_err1;
+ }
+
+ amba_set_drvdata(adev, pdmac);
+
+#ifdef CONFIG_PM_RUNTIME
+ /* to use the runtime PM helper functions */
+ pm_runtime_enable(&adev->dev);
+
+ /* enable the power domain */
+ if (pm_runtime_get_sync(&adev->dev)) {
+ dev_err(&adev->dev, "failed to get runtime pm\n");
+ ret = -ENODEV;
+ goto probe_err1;
+ }
+#else
+ /* enable dma clk */
+ clk_enable(pdmac->clk);
+#endif
+
irq = adev->irq[0];
ret = request_irq(irq, pl330_irq_handler, 0,
dev_name(&adev->dev), pi);
@@ -717,33 +856,45 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */
- for (i = 0; i < pdat->nr_valid_peri; i++) {
- struct dma_pl330_peri *peri = &pdat->peri[i];
- pch = &pdmac->peripherals[i];
+ num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
+ pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
+ if (!pdmac->peripherals) {
+ ret = -ENOMEM;
+ dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n");
+ goto probe_err4;
+ }
- switch (peri->rqtype) {
- case MEMTOMEM:
+ for (i = 0; i < num_chan; i++) {
+ pch = &pdmac->peripherals[i];
+ if (pdat) {
+ struct dma_pl330_peri *peri = &pdat->peri[i];
+
+ switch (peri->rqtype) {
+ case MEMTOMEM:
+ dma_cap_set(DMA_MEMCPY, pd->cap_mask);
+ break;
+ case MEMTODEV:
+ case DEVTOMEM:
+ dma_cap_set(DMA_SLAVE, pd->cap_mask);
+ dma_cap_set(DMA_CYCLIC, pd->cap_mask);
+ break;
+ default:
+ dev_err(&adev->dev, "DEVTODEV Not Supported\n");
+ continue;
+ }
+ pch->chan.private = peri;
+ } else {
dma_cap_set(DMA_MEMCPY, pd->cap_mask);
- break;
- case MEMTODEV:
- case DEVTOMEM:
- dma_cap_set(DMA_SLAVE, pd->cap_mask);
- break;
- default:
- dev_err(&adev->dev, "DEVTODEV Not Supported\n");
- continue;
+ pch->chan.private = NULL;
}
INIT_LIST_HEAD(&pch->work_list);
spin_lock_init(&pch->lock);
pch->pl330_chid = NULL;
- pch->chan.private = peri;
pch->chan.device = pd;
- pch->chan.chan_id = i;
pch->dmac = pdmac;
/* Add the channel to the DMAC list */
- pd->chancnt++;
list_add_tail(&pch->chan.device_node, &pd->channels);
}
@@ -752,6 +903,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
pd->device_free_chan_resources = pl330_free_chan_resources;
pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
+ pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
pd->device_tx_status = pl330_tx_status;
pd->device_prep_slave_sg = pl330_prep_slave_sg;
pd->device_control = pl330_control;
@@ -763,8 +915,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
goto probe_err4;
}
- amba_set_drvdata(adev, pdmac);
-
dev_info(&adev->dev,
"Loaded driver for PL330 DMAC-%d\n", adev->periphid);
dev_info(&adev->dev,
@@ -825,6 +975,13 @@ static int __devexit pl330_remove(struct amba_device *adev)
res = &adev->res;
release_mem_region(res->start, resource_size(res));
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_put(&adev->dev);
+ pm_runtime_disable(&adev->dev);
+#else
+ clk_disable(pdmac->clk);
+#endif
+
kfree(pdmac);
return 0;
@@ -838,10 +995,49 @@ static struct amba_id pl330_ids[] = {
{ 0, 0 },
};
+#ifdef CONFIG_PM_RUNTIME
+static int pl330_runtime_suspend(struct device *dev)
+{
+ struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
+
+ if (!pdmac) {
+ dev_err(dev, "failed to get dmac\n");
+ return -ENODEV;
+ }
+
+ clk_disable(pdmac->clk);
+
+ return 0;
+}
+
+static int pl330_runtime_resume(struct device *dev)
+{
+ struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
+
+ if (!pdmac) {
+ dev_err(dev, "failed to get dmac\n");
+ return -ENODEV;
+ }
+
+ clk_enable(pdmac->clk);
+
+ return 0;
+}
+#else
+#define pl330_runtime_suspend NULL
+#define pl330_runtime_resume NULL
+#endif /* CONFIG_PM_RUNTIME */
+
+static const struct dev_pm_ops pl330_pm_ops = {
+ .runtime_suspend = pl330_runtime_suspend,
+ .runtime_resume = pl330_runtime_resume,
+};
+
static struct amba_driver pl330_driver = {
.drv = {
.owner = THIS_MODULE,
.name = "dma-pl330",
+ .pm = &pl330_pm_ops,
},
.id_table = pl330_ids,
.probe = pl330_probe,
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 0283300..81809c2 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -70,12 +70,36 @@ static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
static u16 dmaor_read(struct sh_dmae_device *shdev)
{
- return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
+ u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+
+ if (shdev->pdata->dmaor_is_32bit)
+ return __raw_readl(addr);
+ else
+ return __raw_readw(addr);
}
static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
{
- __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
+ u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+
+ if (shdev->pdata->dmaor_is_32bit)
+ __raw_writel(data, addr);
+ else
+ __raw_writew(data, addr);
+}
+
+static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
+}
+
+static u32 chcr_read(struct sh_dmae_chan *sh_dc)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
}
/*
@@ -120,7 +144,7 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
{
- u32 chcr = sh_dmae_readl(sh_chan, CHCR);
+ u32 chcr = chcr_read(sh_chan);
if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
return true; /* working */
@@ -130,8 +154,7 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
{
- struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
- struct sh_dmae_device, common);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
@@ -144,8 +167,7 @@ static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
{
- struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
- struct sh_dmae_device, common);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
int i;
@@ -169,18 +191,23 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
static void dmae_start(struct sh_dmae_chan *sh_chan)
{
- u32 chcr = sh_dmae_readl(sh_chan, CHCR);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ u32 chcr = chcr_read(sh_chan);
+
+ if (shdev->pdata->needs_tend_set)
+ sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
- chcr |= CHCR_DE | CHCR_IE;
- sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR);
+ chcr |= CHCR_DE | shdev->chcr_ie_bit;
+ chcr_write(sh_chan, chcr & ~CHCR_TE);
}
static void dmae_halt(struct sh_dmae_chan *sh_chan)
{
- u32 chcr = sh_dmae_readl(sh_chan, CHCR);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ u32 chcr = chcr_read(sh_chan);
- chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
- sh_dmae_writel(sh_chan, chcr, CHCR);
+ chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
+ chcr_write(sh_chan, chcr);
}
static void dmae_init(struct sh_dmae_chan *sh_chan)
@@ -192,7 +219,7 @@ static void dmae_init(struct sh_dmae_chan *sh_chan)
u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
LOG2_DEFAULT_XFER_SIZE);
sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
- sh_dmae_writel(sh_chan, chcr, CHCR);
+ chcr_write(sh_chan, chcr);
}
static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
@@ -202,23 +229,25 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
return -EBUSY;
sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
- sh_dmae_writel(sh_chan, val, CHCR);
+ chcr_write(sh_chan, val);
return 0;
}
static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
{
- struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
- struct sh_dmae_device, common);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
u16 __iomem *addr = shdev->dmars;
- int shift = chan_pdata->dmars_bit;
+ unsigned int shift = chan_pdata->dmars_bit;
if (dmae_is_busy(sh_chan))
return -EBUSY;
+ if (pdata->no_dmars)
+ return 0;
+
/* in the case of a missing DMARS resource use first memory window */
if (!addr)
addr = (u16 __iomem *)shdev->chan_reg;
@@ -230,14 +259,23 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
return 0;
}
+static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
+
static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
+ struct sh_dmae_slave *param = tx->chan->private;
dma_async_tx_callback callback = tx->callback;
dma_cookie_t cookie;
+ bool power_up;
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irq(&sh_chan->desc_lock);
+
+ if (list_empty(&sh_chan->ld_queue))
+ power_up = true;
+ else
+ power_up = false;
cookie = sh_chan->common.cookie;
cookie++;
@@ -273,7 +311,38 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
tx->cookie, &last->async_tx, sh_chan->id,
desc->hw.sar, desc->hw.tcr, desc->hw.dar);
- spin_unlock_bh(&sh_chan->desc_lock);
+ if (power_up) {
+ sh_chan->pm_state = DMAE_PM_BUSY;
+
+ pm_runtime_get(sh_chan->dev);
+
+ spin_unlock_irq(&sh_chan->desc_lock);
+
+ pm_runtime_barrier(sh_chan->dev);
+
+ spin_lock_irq(&sh_chan->desc_lock);
+
+ /* Have we been reset, while waiting? */
+ if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) {
+ dev_dbg(sh_chan->dev, "Bring up channel %d\n",
+ sh_chan->id);
+ if (param) {
+ const struct sh_dmae_slave_config *cfg =
+ param->config;
+
+ dmae_set_dmars(sh_chan, cfg->mid_rid);
+ dmae_set_chcr(sh_chan, cfg->chcr);
+ } else {
+ dmae_init(sh_chan);
+ }
+
+ if (sh_chan->pm_state == DMAE_PM_PENDING)
+ sh_chan_xfer_ld_queue(sh_chan);
+ sh_chan->pm_state = DMAE_PM_ESTABLISHED;
+ }
+ }
+
+ spin_unlock_irq(&sh_chan->desc_lock);
return cookie;
}
@@ -296,9 +365,7 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
static const struct sh_dmae_slave_config *sh_dmae_find_slave(
struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
{
- struct dma_device *dma_dev = sh_chan->common.device;
- struct sh_dmae_device *shdev = container_of(dma_dev,
- struct sh_dmae_device, common);
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
struct sh_dmae_pdata *pdata = shdev->pdata;
int i;
@@ -319,8 +386,6 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
struct sh_dmae_slave *param = chan->private;
int ret;
- pm_runtime_get_sync(sh_chan->dev);
-
/*
* This relies on the guarantee from dmaengine that alloc_chan_resources
* never runs concurrently with itself or free_chan_resources.
@@ -340,31 +405,20 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
}
param->config = cfg;
-
- dmae_set_dmars(sh_chan, cfg->mid_rid);
- dmae_set_chcr(sh_chan, cfg->chcr);
- } else {
- dmae_init(sh_chan);
}
- spin_lock_bh(&sh_chan->desc_lock);
while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
- spin_unlock_bh(&sh_chan->desc_lock);
desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
- if (!desc) {
- spin_lock_bh(&sh_chan->desc_lock);
+ if (!desc)
break;
- }
dma_async_tx_descriptor_init(&desc->async_tx,
&sh_chan->common);
desc->async_tx.tx_submit = sh_dmae_tx_submit;
desc->mark = DESC_IDLE;
- spin_lock_bh(&sh_chan->desc_lock);
list_add(&desc->node, &sh_chan->ld_free);
sh_chan->descs_allocated++;
}
- spin_unlock_bh(&sh_chan->desc_lock);
if (!sh_chan->descs_allocated) {
ret = -ENOMEM;
@@ -378,7 +432,7 @@ edescalloc:
clear_bit(param->slave_id, sh_dmae_slave_used);
etestused:
efindslave:
- pm_runtime_put(sh_chan->dev);
+ chan->private = NULL;
return ret;
}
@@ -390,7 +444,6 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
struct sh_desc *desc, *_desc;
LIST_HEAD(list);
- int descs = sh_chan->descs_allocated;
/* Protect against ISR */
spin_lock_irq(&sh_chan->desc_lock);
@@ -410,15 +463,12 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
chan->private = NULL;
}
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irq(&sh_chan->desc_lock);
list_splice_init(&sh_chan->ld_free, &list);
sh_chan->descs_allocated = 0;
- spin_unlock_bh(&sh_chan->desc_lock);
-
- if (descs > 0)
- pm_runtime_put(sh_chan->dev);
+ spin_unlock_irq(&sh_chan->desc_lock);
list_for_each_entry_safe(desc, _desc, &list, node)
kfree(desc);
@@ -507,6 +557,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
struct sh_desc *first = NULL, *new = NULL /* compiler... */;
LIST_HEAD(tx_list);
int chunks = 0;
+ unsigned long irq_flags;
int i;
if (!sg_len)
@@ -517,7 +568,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
(SH_DMA_TCR_MAX + 1);
/* Have to lock the whole loop to protect against concurrent release */
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irqsave(&sh_chan->desc_lock, irq_flags);
/*
* Chaining:
@@ -563,7 +614,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
/* Put them back on the free list, so, they don't get lost */
list_splice_tail(&tx_list, &sh_chan->ld_free);
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
return &first->async_tx;
@@ -572,7 +623,7 @@ err_get_desc:
new->mark = DESC_IDLE;
list_splice(&tx_list, &sh_chan->ld_free);
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
return NULL;
}
@@ -634,6 +685,7 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
+ unsigned long flags;
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
@@ -642,7 +694,7 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
if (!chan)
return -EINVAL;
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irqsave(&sh_chan->desc_lock, flags);
dmae_halt(sh_chan);
if (!list_empty(&sh_chan->ld_queue)) {
@@ -651,9 +703,8 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
struct sh_desc, node);
desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
sh_chan->xmit_shift;
-
}
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
sh_dmae_chan_ld_cleanup(sh_chan, true);
@@ -668,8 +719,9 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
dma_cookie_t cookie = 0;
dma_async_tx_callback callback = NULL;
void *param = NULL;
+ unsigned long flags;
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irqsave(&sh_chan->desc_lock, flags);
list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
struct dma_async_tx_descriptor *tx = &desc->async_tx;
@@ -735,7 +787,13 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
async_tx_test_ack(&desc->async_tx)) || all) {
/* Remove from ld_queue list */
desc->mark = DESC_IDLE;
+
list_move(&desc->node, &sh_chan->ld_free);
+
+ if (list_empty(&sh_chan->ld_queue)) {
+ dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
+ pm_runtime_put(sh_chan->dev);
+ }
}
}
@@ -746,7 +804,7 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
*/
sh_chan->completed_cookie = sh_chan->common.cookie;
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
if (callback)
callback(param);
@@ -765,16 +823,14 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
;
}
+/* Called under spin_lock_irq(&sh_chan->desc_lock) */
static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
{
struct sh_desc *desc;
- spin_lock_bh(&sh_chan->desc_lock);
/* DMA work check */
- if (dmae_is_busy(sh_chan)) {
- spin_unlock_bh(&sh_chan->desc_lock);
+ if (dmae_is_busy(sh_chan))
return;
- }
/* Find the first not transferred descriptor */
list_for_each_entry(desc, &sh_chan->ld_queue, node)
@@ -787,14 +843,18 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
dmae_start(sh_chan);
break;
}
-
- spin_unlock_bh(&sh_chan->desc_lock);
}
static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
{
struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
- sh_chan_xfer_ld_queue(sh_chan);
+
+ spin_lock_irq(&sh_chan->desc_lock);
+ if (sh_chan->pm_state == DMAE_PM_ESTABLISHED)
+ sh_chan_xfer_ld_queue(sh_chan);
+ else
+ sh_chan->pm_state = DMAE_PM_PENDING;
+ spin_unlock_irq(&sh_chan->desc_lock);
}
static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
@@ -805,6 +865,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
dma_cookie_t last_used;
dma_cookie_t last_complete;
enum dma_status status;
+ unsigned long flags;
sh_dmae_chan_ld_cleanup(sh_chan, false);
@@ -815,7 +876,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
BUG_ON(last_complete < 0);
dma_set_tx_state(txstate, last_complete, last_used, 0);
- spin_lock_bh(&sh_chan->desc_lock);
+ spin_lock_irqsave(&sh_chan->desc_lock, flags);
status = dma_async_is_complete(cookie, last_complete, last_used);
@@ -833,7 +894,7 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
}
}
- spin_unlock_bh(&sh_chan->desc_lock);
+ spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
return status;
}
@@ -846,7 +907,7 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data)
spin_lock(&sh_chan->desc_lock);
- chcr = sh_dmae_readl(sh_chan, CHCR);
+ chcr = chcr_read(sh_chan);
if (chcr & CHCR_TE) {
/* DMA stop */
@@ -886,6 +947,12 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev)
list_splice_init(&sh_chan->ld_queue, &dl);
+ if (!list_empty(&dl)) {
+ dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
+ pm_runtime_put(sh_chan->dev);
+ }
+ sh_chan->pm_state = DMAE_PM_ESTABLISHED;
+
spin_unlock(&sh_chan->desc_lock);
/* Complete all */
@@ -926,7 +993,7 @@ static void dmae_do_tasklet(unsigned long data)
u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
- spin_lock(&sh_chan->desc_lock);
+ spin_lock_irq(&sh_chan->desc_lock);
list_for_each_entry(desc, &sh_chan->ld_queue, node) {
if (desc->mark == DESC_SUBMITTED &&
((desc->direction == DMA_FROM_DEVICE &&
@@ -939,10 +1006,10 @@ static void dmae_do_tasklet(unsigned long data)
break;
}
}
- spin_unlock(&sh_chan->desc_lock);
-
/* Next desc */
sh_chan_xfer_ld_queue(sh_chan);
+ spin_unlock_irq(&sh_chan->desc_lock);
+
sh_dmae_chan_ld_cleanup(sh_chan, false);
}
@@ -1010,7 +1077,9 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
return -ENOMEM;
}
- /* copy struct dma_device */
+ new_sh_chan->pm_state = DMAE_PM_ESTABLISHED;
+
+ /* reference struct dma_device */
new_sh_chan->common.device = &shdev->common;
new_sh_chan->dev = shdev->common.dev;
@@ -1144,6 +1213,16 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
/* platform data */
shdev->pdata = pdata;
+ if (pdata->chcr_offset)
+ shdev->chcr_offset = pdata->chcr_offset;
+ else
+ shdev->chcr_offset = CHCR;
+
+ if (pdata->chcr_ie_bit)
+ shdev->chcr_ie_bit = pdata->chcr_ie_bit;
+ else
+ shdev->chcr_ie_bit = CHCR_IE;
+
platform_set_drvdata(pdev, shdev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 5ae9fc5..2b55a27 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -23,6 +23,12 @@
struct device;
+enum dmae_pm_state {
+ DMAE_PM_ESTABLISHED,
+ DMAE_PM_BUSY,
+ DMAE_PM_PENDING,
+};
+
struct sh_dmae_chan {
dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */
@@ -38,6 +44,7 @@ struct sh_dmae_chan {
u32 __iomem *base;
char dev_id[16]; /* unique name per DMAC of channel */
int pm_error;
+ enum dmae_pm_state pm_state;
};
struct sh_dmae_device {
@@ -47,10 +54,14 @@ struct sh_dmae_device {
struct list_head node;
u32 __iomem *chan_reg;
u16 __iomem *dmars;
+ unsigned int chcr_offset;
+ u32 chcr_ie_bit;
};
#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common)
#define to_sh_desc(lh) container_of(lh, struct sh_desc, node)
#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
+#define to_sh_dev(chan) container_of(chan->common.device,\
+ struct sh_dmae_device, common)
#endif /* __DMA_SHDMA_H */
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 8f222d4..4b27c54 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -6,13 +6,16 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/dmaengine.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/amba/bus.h>
#include <plat/ste_dma40.h>
@@ -44,9 +47,6 @@
#define D40_ALLOC_PHY (1 << 30)
#define D40_ALLOC_LOG_FREE 0
-/* Hardware designer of the block */
-#define D40_HW_DESIGNER 0x8
-
/**
* enum 40_command - The different commands and/or statuses.
*
@@ -175,8 +175,10 @@ struct d40_base;
* @tasklet: Tasklet that gets scheduled from interrupt context to complete a
* transfer and call client callback.
* @client: Cliented owned descriptor list.
+ * @pending_queue: Submitted jobs, to be issued by issue_pending()
* @active: Active descriptor.
* @queue: Queued jobs.
+ * @prepare_queue: Prepared jobs.
* @dma_cfg: The client configuration of this dma channel.
* @configured: whether the dma_cfg configuration is valid
* @base: Pointer to the device instance struct.
@@ -185,6 +187,8 @@ struct d40_base;
* @log_def: Default logical channel settings.
* @lcla: Space for one dst src pair for logical channel transfers.
* @lcpa: Pointer to dst and src lcpa settings.
+ * @runtime_addr: runtime configured address.
+ * @runtime_direction: runtime configured direction.
*
* This struct can either "be" a logical or a physical channel.
*/
@@ -199,8 +203,10 @@ struct d40_chan {
struct dma_chan chan;
struct tasklet_struct tasklet;
struct list_head client;
+ struct list_head pending_queue;
struct list_head active;
struct list_head queue;
+ struct list_head prepare_queue;
struct stedma40_chan_cfg dma_cfg;
bool configured;
struct d40_base *base;
@@ -475,7 +481,6 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
list_for_each_entry_safe(d, _d, &d40c->client, node)
if (async_tx_test_ack(&d->txd)) {
- d40_pool_lli_free(d40c, d);
d40_desc_remove(d);
desc = d;
memset(desc, 0, sizeof(*desc));
@@ -642,9 +647,25 @@ static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
return d;
}
+/* remove desc from current queue and add it to the pending_queue */
static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
{
- list_add_tail(&desc->node, &d40c->queue);
+ d40_desc_remove(desc);
+ desc->is_in_client_list = false;
+ list_add_tail(&desc->node, &d40c->pending_queue);
+}
+
+static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
+{
+ struct d40_desc *d;
+
+ if (list_empty(&d40c->pending_queue))
+ return NULL;
+
+ d = list_first_entry(&d40c->pending_queue,
+ struct d40_desc,
+ node);
+ return d;
}
static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
@@ -788,6 +809,7 @@ done:
static void d40_term_all(struct d40_chan *d40c)
{
struct d40_desc *d40d;
+ struct d40_desc *_d;
/* Release active descriptors */
while ((d40d = d40_first_active_get(d40c))) {
@@ -801,6 +823,26 @@ static void d40_term_all(struct d40_chan *d40c)
d40_desc_free(d40c, d40d);
}
+ /* Release pending descriptors */
+ while ((d40d = d40_first_pending(d40c))) {
+ d40_desc_remove(d40d);
+ d40_desc_free(d40c, d40d);
+ }
+
+ /* Release client owned descriptors */
+ if (!list_empty(&d40c->client))
+ list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
+ d40_desc_remove(d40d);
+ d40_desc_free(d40c, d40d);
+ }
+
+ /* Release descriptors in prepare queue */
+ if (!list_empty(&d40c->prepare_queue))
+ list_for_each_entry_safe(d40d, _d,
+ &d40c->prepare_queue, node) {
+ d40_desc_remove(d40d);
+ d40_desc_free(d40c, d40d);
+ }
d40c->pending_tx = 0;
d40c->busy = false;
@@ -1160,6 +1202,7 @@ static void dma_tasklet(unsigned long data)
struct d40_chan *d40c = (struct d40_chan *) data;
struct d40_desc *d40d;
unsigned long flags;
+ bool callback_active;
dma_async_tx_callback callback;
void *callback_param;
@@ -1183,12 +1226,12 @@ static void dma_tasklet(unsigned long data)
}
/* Callback to client */
+ callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
callback = d40d->txd.callback;
callback_param = d40d->txd.callback_param;
if (!d40d->cyclic) {
if (async_tx_test_ack(&d40d->txd)) {
- d40_pool_lli_free(d40c, d40d);
d40_desc_remove(d40d);
d40_desc_free(d40c, d40d);
} else {
@@ -1208,7 +1251,7 @@ static void dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&d40c->lock, flags);
- if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
+ if (callback_active && callback)
callback(callback_param);
return;
@@ -1575,21 +1618,10 @@ static int d40_free_dma(struct d40_chan *d40c)
u32 event;
struct d40_phy_res *phy = d40c->phy_chan;
bool is_src;
- struct d40_desc *d;
- struct d40_desc *_d;
-
/* Terminate all queued and active transfers */
d40_term_all(d40c);
- /* Release client owned descriptors */
- if (!list_empty(&d40c->client))
- list_for_each_entry_safe(d, _d, &d40c->client, node) {
- d40_pool_lli_free(d40c, d);
- d40_desc_remove(d);
- d40_desc_free(d40c, d);
- }
-
if (phy == NULL) {
chan_err(d40c, "phy == null\n");
return -EINVAL;
@@ -1891,6 +1923,12 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
goto err;
}
+ /*
+ * add descriptor to the prepare queue in order to be able
+ * to free them later in terminate_all
+ */
+ list_add_tail(&desc->node, &chan->prepare_queue);
+
spin_unlock_irqrestore(&chan->lock, flags);
return &desc->txd;
@@ -2091,7 +2129,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
struct scatterlist *sg;
int i;
- sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL);
+ sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
for (i = 0; i < periods; i++) {
sg_dma_address(&sg[i]) = dma_addr;
sg_dma_len(&sg[i]) = period_len;
@@ -2151,24 +2189,87 @@ static void d40_issue_pending(struct dma_chan *chan)
spin_lock_irqsave(&d40c->lock, flags);
- /* Busy means that pending jobs are already being processed */
+ list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
+
+ /* Busy means that queued jobs are already being processed */
if (!d40c->busy)
(void) d40_queue_start(d40c);
spin_unlock_irqrestore(&d40c->lock, flags);
}
+static int
+dma40_config_to_halfchannel(struct d40_chan *d40c,
+ struct stedma40_half_channel_info *info,
+ enum dma_slave_buswidth width,
+ u32 maxburst)
+{
+ enum stedma40_periph_data_width addr_width;
+ int psize;
+
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ addr_width = STEDMA40_BYTE_WIDTH;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ addr_width = STEDMA40_HALFWORD_WIDTH;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ addr_width = STEDMA40_WORD_WIDTH;
+ break;
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ addr_width = STEDMA40_DOUBLEWORD_WIDTH;
+ break;
+ default:
+ dev_err(d40c->base->dev,
+ "illegal peripheral address width "
+ "requested (%d)\n",
+ width);
+ return -EINVAL;
+ }
+
+ if (chan_is_logical(d40c)) {
+ if (maxburst >= 16)
+ psize = STEDMA40_PSIZE_LOG_16;
+ else if (maxburst >= 8)
+ psize = STEDMA40_PSIZE_LOG_8;
+ else if (maxburst >= 4)
+ psize = STEDMA40_PSIZE_LOG_4;
+ else
+ psize = STEDMA40_PSIZE_LOG_1;
+ } else {
+ if (maxburst >= 16)
+ psize = STEDMA40_PSIZE_PHY_16;
+ else if (maxburst >= 8)
+ psize = STEDMA40_PSIZE_PHY_8;
+ else if (maxburst >= 4)
+ psize = STEDMA40_PSIZE_PHY_4;
+ else
+ psize = STEDMA40_PSIZE_PHY_1;
+ }
+
+ info->data_width = addr_width;
+ info->psize = psize;
+ info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
+
+ return 0;
+}
+
/* Runtime reconfiguration extension */
-static void d40_set_runtime_config(struct dma_chan *chan,
- struct dma_slave_config *config)
+static int d40_set_runtime_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
{
struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
- enum dma_slave_buswidth config_addr_width;
+ enum dma_slave_buswidth src_addr_width, dst_addr_width;
dma_addr_t config_addr;
- u32 config_maxburst;
- enum stedma40_periph_data_width addr_width;
- int psize;
+ u32 src_maxburst, dst_maxburst;
+ int ret;
+
+ src_addr_width = config->src_addr_width;
+ src_maxburst = config->src_maxburst;
+ dst_addr_width = config->dst_addr_width;
+ dst_maxburst = config->dst_maxburst;
if (config->direction == DMA_FROM_DEVICE) {
dma_addr_t dev_addr_rx =
@@ -2187,8 +2288,11 @@ static void d40_set_runtime_config(struct dma_chan *chan,
cfg->dir);
cfg->dir = STEDMA40_PERIPH_TO_MEM;
- config_addr_width = config->src_addr_width;
- config_maxburst = config->src_maxburst;
+ /* Configure the memory side */
+ if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ dst_addr_width = src_addr_width;
+ if (dst_maxburst == 0)
+ dst_maxburst = src_maxburst;
} else if (config->direction == DMA_TO_DEVICE) {
dma_addr_t dev_addr_tx =
@@ -2207,68 +2311,39 @@ static void d40_set_runtime_config(struct dma_chan *chan,
cfg->dir);
cfg->dir = STEDMA40_MEM_TO_PERIPH;
- config_addr_width = config->dst_addr_width;
- config_maxburst = config->dst_maxburst;
-
+ /* Configure the memory side */
+ if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ src_addr_width = dst_addr_width;
+ if (src_maxburst == 0)
+ src_maxburst = dst_maxburst;
} else {
dev_err(d40c->base->dev,
"unrecognized channel direction %d\n",
config->direction);
- return;
+ return -EINVAL;
}
- switch (config_addr_width) {
- case DMA_SLAVE_BUSWIDTH_1_BYTE:
- addr_width = STEDMA40_BYTE_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_2_BYTES:
- addr_width = STEDMA40_HALFWORD_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_4_BYTES:
- addr_width = STEDMA40_WORD_WIDTH;
- break;
- case DMA_SLAVE_BUSWIDTH_8_BYTES:
- addr_width = STEDMA40_DOUBLEWORD_WIDTH;
- break;
- default:
+ if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
dev_err(d40c->base->dev,
- "illegal peripheral address width "
- "requested (%d)\n",
- config->src_addr_width);
- return;
+ "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
+ src_maxburst,
+ src_addr_width,
+ dst_maxburst,
+ dst_addr_width);
+ return -EINVAL;
}
- if (chan_is_logical(d40c)) {
- if (config_maxburst >= 16)
- psize = STEDMA40_PSIZE_LOG_16;
- else if (config_maxburst >= 8)
- psize = STEDMA40_PSIZE_LOG_8;
- else if (config_maxburst >= 4)
- psize = STEDMA40_PSIZE_LOG_4;
- else
- psize = STEDMA40_PSIZE_LOG_1;
- } else {
- if (config_maxburst >= 16)
- psize = STEDMA40_PSIZE_PHY_16;
- else if (config_maxburst >= 8)
- psize = STEDMA40_PSIZE_PHY_8;
- else if (config_maxburst >= 4)
- psize = STEDMA40_PSIZE_PHY_4;
- else if (config_maxburst >= 2)
- psize = STEDMA40_PSIZE_PHY_2;
- else
- psize = STEDMA40_PSIZE_PHY_1;
- }
+ ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
+ src_addr_width,
+ src_maxburst);
+ if (ret)
+ return ret;
- /* Set up all the endpoint configs */
- cfg->src_info.data_width = addr_width;
- cfg->src_info.psize = psize;
- cfg->src_info.big_endian = false;
- cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
- cfg->dst_info.data_width = addr_width;
- cfg->dst_info.psize = psize;
- cfg->dst_info.big_endian = false;
- cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL;
+ ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
+ dst_addr_width,
+ dst_maxburst);
+ if (ret)
+ return ret;
/* Fill in register values */
if (chan_is_logical(d40c))
@@ -2281,12 +2356,14 @@ static void d40_set_runtime_config(struct dma_chan *chan,
d40c->runtime_addr = config_addr;
d40c->runtime_direction = config->direction;
dev_dbg(d40c->base->dev,
- "configured channel %s for %s, data width %d, "
- "maxburst %d bytes, LE, no flow control\n",
+ "configured channel %s for %s, data width %d/%d, "
+ "maxburst %d/%d elements, LE, no flow control\n",
dma_chan_name(chan),
(config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
- config_addr_width,
- config_maxburst);
+ src_addr_width, dst_addr_width,
+ src_maxburst, dst_maxburst);
+
+ return 0;
}
static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
@@ -2307,9 +2384,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
case DMA_RESUME:
return d40_resume(d40c);
case DMA_SLAVE_CONFIG:
- d40_set_runtime_config(chan,
+ return d40_set_runtime_config(chan,
(struct dma_slave_config *) arg);
- return 0;
default:
break;
}
@@ -2340,7 +2416,9 @@ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
INIT_LIST_HEAD(&d40c->active);
INIT_LIST_HEAD(&d40c->queue);
+ INIT_LIST_HEAD(&d40c->pending_queue);
INIT_LIST_HEAD(&d40c->client);
+ INIT_LIST_HEAD(&d40c->prepare_queue);
tasklet_init(&d40c->tasklet, dma_tasklet,
(unsigned long) d40c);
@@ -2501,25 +2579,6 @@ static int __init d40_phy_res_init(struct d40_base *base)
static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
{
- static const struct d40_reg_val dma_id_regs[] = {
- /* Peripheral Id */
- { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
- { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
- /*
- * D40_DREG_PERIPHID2 Depends on HW revision:
- * DB8500ed has 0x0008,
- * ? has 0x0018,
- * DB8500v1 has 0x0028
- * DB8500v2 has 0x0038
- */
- { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
-
- /* PCell Id */
- { .reg = D40_DREG_CELLID0, .val = 0x000d},
- { .reg = D40_DREG_CELLID1, .val = 0x00f0},
- { .reg = D40_DREG_CELLID2, .val = 0x0005},
- { .reg = D40_DREG_CELLID3, .val = 0x00b1}
- };
struct stedma40_platform_data *plat_data;
struct clk *clk = NULL;
void __iomem *virtbase = NULL;
@@ -2528,8 +2587,9 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
int num_log_chans = 0;
int num_phy_chans;
int i;
- u32 val;
- u32 rev;
+ u32 pid;
+ u32 cid;
+ u8 rev;
clk = clk_get(&pdev->dev, NULL);
@@ -2553,32 +2613,32 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
if (!virtbase)
goto failure;
- /* HW version check */
- for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
- if (dma_id_regs[i].val !=
- readl(virtbase + dma_id_regs[i].reg)) {
- d40_err(&pdev->dev,
- "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
- dma_id_regs[i].val,
- dma_id_regs[i].reg,
- readl(virtbase + dma_id_regs[i].reg));
- goto failure;
- }
- }
-
- /* Get silicon revision and designer */
- val = readl(virtbase + D40_DREG_PERIPHID2);
+ /* This is just a regular AMBA PrimeCell ID actually */
+ for (pid = 0, i = 0; i < 4; i++)
+ pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
+ & 255) << (i * 8);
+ for (cid = 0, i = 0; i < 4; i++)
+ cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
+ & 255) << (i * 8);
- if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) !=
- D40_HW_DESIGNER) {
+ if (cid != AMBA_CID) {
+ d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
+ goto failure;
+ }
+ if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
- val & D40_DREG_PERIPHID2_DESIGNER_MASK,
- D40_HW_DESIGNER);
+ AMBA_MANF_BITS(pid),
+ AMBA_VENDOR_ST);
goto failure;
}
-
- rev = (val & D40_DREG_PERIPHID2_REV_MASK) >>
- D40_DREG_PERIPHID2_REV_POS;
+ /*
+ * HW revision:
+ * DB8500ed has revision 0
+ * ? has revision 1
+ * DB8500v1 has revision 2
+ * DB8500v2 has revision 3
+ */
+ rev = AMBA_REV_BITS(pid);
/* The number of physical channels on this HW */
num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index 195ee65..b44c4551 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -184,9 +184,6 @@
#define D40_DREG_PERIPHID0 0xFE0
#define D40_DREG_PERIPHID1 0xFE4
#define D40_DREG_PERIPHID2 0xFE8
-#define D40_DREG_PERIPHID2_REV_POS 4
-#define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS)
-#define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf
#define D40_DREG_PERIPHID3 0xFEC
#define D40_DREG_CELLID0 0xFF0
#define D40_DREG_CELLID1 0xFF4
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index f69f90a..a4a398f 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -753,7 +753,7 @@ static int __devinit td_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&td->dma.channels);
- for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) {
+ for (i = 0; i < pdata->nr_channels; i++) {
struct timb_dma_chan *td_chan = &td->channels[i];
struct timb_dma_platform_data_channel *pchan =
pdata->channels + i;
@@ -762,12 +762,11 @@ static int __devinit td_probe(struct platform_device *pdev)
if ((i % 2) == pchan->rx) {
dev_err(&pdev->dev, "Wrong channel configuration\n");
err = -EINVAL;
- goto err_tasklet_kill;
+ goto err_free_irq;
}
td_chan->chan.device = &td->dma;
td_chan->chan.cookie = 1;
- td_chan->chan.chan_id = i;
spin_lock_init(&td_chan->lock);
INIT_LIST_HEAD(&td_chan->active_list);
INIT_LIST_HEAD(&td_chan->queue);
diff --git a/drivers/eisa/pci_eisa.c b/drivers/eisa/pci_eisa.c
index 0dd0f63..d4cd56a 100644
--- a/drivers/eisa/pci_eisa.c
+++ b/drivers/eisa/pci_eisa.c
@@ -19,8 +19,7 @@
/* There is only *one* pci_eisa device per machine, right ? */
static struct eisa_root_device pci_eisa_root;
-static int __init pci_eisa_init(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int __init pci_eisa_init(struct pci_dev *pdev)
{
int rc;
@@ -45,22 +44,26 @@ static int __init pci_eisa_init(struct pci_dev *pdev,
return 0;
}
-static struct pci_device_id pci_eisa_pci_tbl[] = {
- { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 },
- { 0, }
-};
+/*
+ * We have to call pci_eisa_init_early() before pnpacpi_init()/isapnp_init().
+ * Otherwise pnp resource will get enabled early and could prevent eisa
+ * to be initialized.
+ * Also need to make sure pci_eisa_init_early() is called after
+ * x86/pci_subsys_init().
+ * So need to use subsys_initcall_sync with it.
+ */
+static int __init pci_eisa_init_early(void)
+{
+ struct pci_dev *dev = NULL;
+ int ret;
-static struct pci_driver pci_eisa_driver = {
- .name = "pci_eisa",
- .id_table = pci_eisa_pci_tbl,
- .probe = pci_eisa_init,
-};
+ for_each_pci_dev(dev)
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_EISA) {
+ ret = pci_eisa_init(dev);
+ if (ret)
+ return ret;
+ }
-static int __init pci_eisa_init_module (void)
-{
- return pci_register_driver (&pci_eisa_driver);
+ return 0;
}
-
-device_initcall(pci_eisa_init_module);
-MODULE_DEVICE_TABLE(pci, pci_eisa_pci_tbl);
+subsys_initcall_sync(pci_eisa_init_early);
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index 29d2423..85661b0 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -32,7 +32,7 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#include "core.h"
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index b97d4f0..ee96b91 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1605,8 +1605,7 @@ static int dispatch_ioctl(struct client *client,
_IOC_SIZE(cmd) > sizeof(buffer))
return -ENOTTY;
- if (_IOC_DIR(cmd) == _IOC_READ)
- memset(&buffer, 0, _IOC_SIZE(cmd));
+ memset(&buffer, 0, sizeof(buffer));
if (_IOC_DIR(cmd) & _IOC_WRITE)
if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 812cea3..1f3dd51 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -38,7 +38,7 @@
#include <linux/string.h>
#include <linux/workqueue.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#include <asm/system.h>
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 57c3973..0f90e00 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include <asm/byteorder.h>
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 193ed92..94d3b49 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -29,7 +29,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#include <asm/system.h>
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 334b82a..855ab3f 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -1046,8 +1046,8 @@ static void update_split_timeout(struct fw_card *card)
cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
- cycles = max(cycles, 800u); /* minimum as per the spec */
- cycles = min(cycles, 3u * 8000u); /* maximum OHCI timeout */
+ /* minimum per IEEE 1394, maximum which doesn't overflow OHCI */
+ cycles = clamp(cycles, 800u, 3u * 8000u);
card->split_timeout_cycles = cycles;
card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 0fe4e4e..b45be57 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -9,7 +9,7 @@
#include <linux/slab.h>
#include <linux/types.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
struct device;
struct fw_card;
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index e74750b..7c869b7 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -7,6 +7,7 @@
*/
#include <linux/bug.h>
+#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/ethtool.h>
@@ -73,7 +74,7 @@ struct rfc2734_arp {
__be32 fifo_lo; /* lo 32bits of sender's FIFO addr */
__be32 sip; /* Sender's IP Address */
__be32 tip; /* IP Address of requested hw addr */
-} __attribute__((packed));
+} __packed;
/* This header format is specific to this driver implementation. */
#define FWNET_ALEN 8
@@ -81,7 +82,7 @@ struct rfc2734_arp {
struct fwnet_header {
u8 h_dest[FWNET_ALEN]; /* destination address */
__be16 h_proto; /* packet type ID field */
-} __attribute__((packed));
+} __packed;
/* IPv4 and IPv6 encapsulation header */
struct rfc2734_header {
@@ -261,16 +262,16 @@ static int fwnet_header_rebuild(struct sk_buff *skb)
}
static int fwnet_header_cache(const struct neighbour *neigh,
- struct hh_cache *hh)
+ struct hh_cache *hh, __be16 type)
{
struct net_device *net;
struct fwnet_header *h;
- if (hh->hh_type == cpu_to_be16(ETH_P_802_3))
+ if (type == cpu_to_be16(ETH_P_802_3))
return -1;
net = neigh->dev;
h = (struct fwnet_header *)((u8 *)hh->hh_data + 16 - sizeof(*h));
- h->h_proto = hh->hh_type;
+ h->h_proto = type;
memcpy(h->h_dest, neigh->ha, net->addr_len);
hh->hh_len = FWNET_HLEN;
@@ -501,11 +502,7 @@ static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev,
static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed)
{
max_rec = min(max_rec, speed + 8);
- max_rec = min(max_rec, 0xbU); /* <= 4096 */
- if (max_rec < 8) {
- fw_notify("max_rec %x out of range\n", max_rec);
- max_rec = 8;
- }
+ max_rec = clamp(max_rec, 8U, 11U); /* 512...4096 */
return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE;
}
@@ -1129,17 +1126,12 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
unsigned u;
if (dev->local_fifo == FWNET_NO_FIFO_ADDR) {
- /* outside OHCI posted write area? */
- static const struct fw_address_region region = {
- .start = 0xffff00000000ULL,
- .end = CSR_REGISTER_BASE,
- };
-
dev->handler.length = 4096;
dev->handler.address_callback = fwnet_receive_packet;
dev->handler.callback_data = dev;
- retval = fw_core_add_address_handler(&dev->handler, &region);
+ retval = fw_core_add_address_handler(&dev->handler,
+ &fw_high_memory_region);
if (retval < 0)
goto failed_initial;
diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
index 0618145..763626b 100644
--- a/drivers/firewire/nosy.c
+++ b/drivers/firewire/nosy.c
@@ -37,7 +37,7 @@
#include <linux/uaccess.h>
#include <linux/wait.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <asm/byteorder.h>
#include "nosy.h"
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 271fc51..0a0225a 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -42,6 +42,7 @@
#include <linux/string.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
#include <asm/page.h>
@@ -125,6 +126,7 @@ struct context {
struct fw_ohci *ohci;
u32 regs;
int total_allocation;
+ u32 current_bus;
bool running;
bool flushing;
@@ -226,7 +228,7 @@ struct fw_ohci {
__le32 *self_id_cpu;
dma_addr_t self_id_bus;
- struct tasklet_struct bus_reset_tasklet;
+ struct work_struct bus_reset_work;
u32 self_id_buffer[512];
};
@@ -253,7 +255,6 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
#define OHCI1394_REGISTER_SIZE 0x800
-#define OHCI_LOOP_COUNT 500
#define OHCI1394_PCI_HCI_Control 0x40
#define SELF_ID_BUF_SIZE 0x800
#define OHCI_TCODE_PHY_PACKET 0x0e
@@ -265,6 +266,8 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001
#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
+#define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
+#define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
#define QUIRK_CYCLE_TIMER 1
@@ -272,6 +275,7 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define QUIRK_BE_HEADERS 4
#define QUIRK_NO_1394A 8
#define QUIRK_NO_MSI 16
+#define QUIRK_TI_SLLZ059 32
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
@@ -304,6 +308,12 @@ static const struct {
{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
+ {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID,
+ QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
+
+ {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID,
+ QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
+
{PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_RESET_PACKET},
@@ -320,6 +330,7 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
", disable MSI = " __stringify(QUIRK_NO_MSI)
+ ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
")");
#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -521,6 +532,12 @@ static inline void flush_writes(const struct fw_ohci *ohci)
reg_read(ohci, OHCI1394_Version);
}
+/*
+ * Beware! read_phy_reg(), write_phy_reg(), update_phy_reg(), and
+ * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
+ * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg()
+ * directly. Exceptions are intrinsically serialized contexts like pci_probe.
+ */
static int read_phy_reg(struct fw_ohci *ohci, int addr)
{
u32 val;
@@ -529,6 +546,9 @@ static int read_phy_reg(struct fw_ohci *ohci, int addr)
reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
for (i = 0; i < 3 + 100; i++) {
val = reg_read(ohci, OHCI1394_PhyControl);
+ if (!~val)
+ return -ENODEV; /* Card was ejected. */
+
if (val & OHCI1394_PhyControl_ReadDone)
return OHCI1394_PhyControl_ReadData(val);
@@ -552,6 +572,9 @@ static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
OHCI1394_PhyControl_Write(addr, val));
for (i = 0; i < 3 + 100; i++) {
val = reg_read(ohci, OHCI1394_PhyControl);
+ if (!~val)
+ return -ENODEV; /* Card was ejected. */
+
if (!(val & OHCI1394_PhyControl_WritePending))
return 0;
@@ -637,7 +660,6 @@ static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
ctx->last_buffer_index = index;
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
- flush_writes(ctx->ohci);
}
static void ar_context_release(struct ar_context *ctx)
@@ -853,7 +875,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
*
* Alas some chips sometimes emit bus reset packets with a
* wrong generation. We set the correct generation for these
- * at a slightly incorrect time (in bus_reset_tasklet).
+ * at a slightly incorrect time (in bus_reset_work).
*/
if (evt == OHCI1394_evt_bus_reset) {
if (!(ohci->quirks & QUIRK_RESET_PACKET))
@@ -1009,7 +1031,6 @@ static void ar_context_run(struct ar_context *ctx)
reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
- flush_writes(ctx->ohci);
}
static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
@@ -1041,6 +1062,7 @@ static void context_tasklet(unsigned long data)
address = le32_to_cpu(last->branch_address);
z = address & 0xf;
address &= ~0xf;
+ ctx->current_bus = address;
/* If the branch address points to a buffer outside of the
* current buffer, advance to the next buffer. */
@@ -1209,14 +1231,14 @@ static void context_stop(struct context *ctx)
reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
ctx->running = false;
- flush_writes(ctx->ohci);
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 1000; i++) {
reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
if ((reg & CONTEXT_ACTIVE) == 0)
return;
- mdelay(1);
+ if (i)
+ udelay(10);
}
fw_error("Error: DMA context still active (0x%08x)\n", reg);
}
@@ -1353,12 +1375,10 @@ static int at_context_queue_packet(struct context *ctx,
context_append(ctx, d, z, 4 - z);
- if (ctx->running) {
+ if (ctx->running)
reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
- flush_writes(ohci);
- } else {
+ else
context_run(ctx, 0);
- }
return 0;
}
@@ -1710,9 +1730,94 @@ static u32 update_bus_time(struct fw_ohci *ohci)
return ohci->bus_time | cycle_time_seconds;
}
-static void bus_reset_tasklet(unsigned long data)
+static int get_status_for_port(struct fw_ohci *ohci, int port_index)
+{
+ int reg;
+
+ mutex_lock(&ohci->phy_reg_mutex);
+ reg = write_phy_reg(ohci, 7, port_index);
+ if (reg >= 0)
+ reg = read_phy_reg(ohci, 8);
+ mutex_unlock(&ohci->phy_reg_mutex);
+ if (reg < 0)
+ return reg;
+
+ switch (reg & 0x0f) {
+ case 0x06:
+ return 2; /* is child node (connected to parent node) */
+ case 0x0e:
+ return 3; /* is parent node (connected to child node) */
+ }
+ return 1; /* not connected */
+}
+
+static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
+ int self_id_count)
+{
+ int i;
+ u32 entry;
+
+ for (i = 0; i < self_id_count; i++) {
+ entry = ohci->self_id_buffer[i];
+ if ((self_id & 0xff000000) == (entry & 0xff000000))
+ return -1;
+ if ((self_id & 0xff000000) < (entry & 0xff000000))
+ return i;
+ }
+ return i;
+}
+
+/*
+ * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally
+ * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059.
+ * Construct the selfID from phy register contents.
+ * FIXME: How to determine the selfID.i flag?
+ */
+static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
{
- struct fw_ohci *ohci = (struct fw_ohci *)data;
+ int reg, i, pos, status;
+ /* link active 1, speed 3, bridge 0, contender 1, more packets 0 */
+ u32 self_id = 0x8040c800;
+
+ reg = reg_read(ohci, OHCI1394_NodeID);
+ if (!(reg & OHCI1394_NodeID_idValid)) {
+ fw_notify("node ID not valid, new bus reset in progress\n");
+ return -EBUSY;
+ }
+ self_id |= ((reg & 0x3f) << 24); /* phy ID */
+
+ reg = ohci_read_phy_reg(&ohci->card, 4);
+ if (reg < 0)
+ return reg;
+ self_id |= ((reg & 0x07) << 8); /* power class */
+
+ reg = ohci_read_phy_reg(&ohci->card, 1);
+ if (reg < 0)
+ return reg;
+ self_id |= ((reg & 0x3f) << 16); /* gap count */
+
+ for (i = 0; i < 3; i++) {
+ status = get_status_for_port(ohci, i);
+ if (status < 0)
+ return status;
+ self_id |= ((status & 0x3) << (6 - (i * 2)));
+ }
+
+ pos = get_self_id_pos(ohci, self_id, self_id_count);
+ if (pos >= 0) {
+ memmove(&(ohci->self_id_buffer[pos+1]),
+ &(ohci->self_id_buffer[pos]),
+ (self_id_count - pos) * sizeof(*ohci->self_id_buffer));
+ ohci->self_id_buffer[pos] = self_id;
+ self_id_count++;
+ }
+ return self_id_count;
+}
+
+static void bus_reset_work(struct work_struct *work)
+{
+ struct fw_ohci *ohci =
+ container_of(work, struct fw_ohci, bus_reset_work);
int self_id_count, i, j, reg;
int generation, new_generation;
unsigned long flags;
@@ -1750,21 +1855,50 @@ static void bus_reset_tasklet(unsigned long data)
* bit extra to get the actual number of self IDs.
*/
self_id_count = (reg >> 3) & 0xff;
- if (self_id_count == 0 || self_id_count > 252) {
+
+ if (self_id_count > 252) {
fw_notify("inconsistent self IDs\n");
return;
}
+
generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
rmb();
for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
- fw_notify("inconsistent self IDs\n");
- return;
+ /*
+ * If the invalid data looks like a cycle start packet,
+ * it's likely to be the result of the cycle master
+ * having a wrong gap count. In this case, the self IDs
+ * so far are valid and should be processed so that the
+ * bus manager can then correct the gap count.
+ */
+ if (cond_le32_to_cpu(ohci->self_id_cpu[i])
+ == 0xffff008f) {
+ fw_notify("ignoring spurious self IDs\n");
+ self_id_count = j;
+ break;
+ } else {
+ fw_notify("inconsistent self IDs\n");
+ return;
+ }
}
ohci->self_id_buffer[j] =
cond_le32_to_cpu(ohci->self_id_cpu[i]);
}
+
+ if (ohci->quirks & QUIRK_TI_SLLZ059) {
+ self_id_count = find_and_insert_self_id(ohci, self_id_count);
+ if (self_id_count < 0) {
+ fw_notify("could not construct local self ID\n");
+ return;
+ }
+ }
+
+ if (self_id_count == 0) {
+ fw_notify("inconsistent self IDs\n");
+ return;
+ }
rmb();
/*
@@ -1884,7 +2018,7 @@ static irqreturn_t irq_handler(int irq, void *data)
log_irqs(event);
if (event & OHCI1394_selfIDComplete)
- tasklet_schedule(&ohci->bus_reset_tasklet);
+ queue_work(fw_workqueue, &ohci->bus_reset_work);
if (event & OHCI1394_RQPkt)
tasklet_schedule(&ohci->ar_request_ctx.tasklet);
@@ -1931,7 +2065,8 @@ static irqreturn_t irq_handler(int irq, void *data)
reg_read(ohci, OHCI1394_PostedWriteAddressLo);
reg_write(ohci, OHCI1394_IntEventClear,
OHCI1394_postedWriteErr);
- fw_error("PCI posted write error\n");
+ if (printk_ratelimit())
+ fw_error("PCI posted write error\n");
}
if (unlikely(event & OHCI1394_cycleTooLong)) {
@@ -1967,14 +2102,18 @@ static irqreturn_t irq_handler(int irq, void *data)
static int software_reset(struct fw_ohci *ohci)
{
+ u32 val;
int i;
reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
+ for (i = 0; i < 500; i++) {
+ val = reg_read(ohci, OHCI1394_HCControlSet);
+ if (!~val)
+ return -ENODEV; /* Card was ejected. */
- for (i = 0; i < OHCI_LOOP_COUNT; i++) {
- if ((reg_read(ohci, OHCI1394_HCControlSet) &
- OHCI1394_HCControl_softReset) == 0)
+ if (!(val & OHCI1394_HCControl_softReset))
return 0;
+
msleep(1);
}
@@ -2041,6 +2180,28 @@ static int configure_1394a_enhancements(struct fw_ohci *ohci)
return 0;
}
+static int probe_tsb41ba3d(struct fw_ohci *ohci)
+{
+ /* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */
+ static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
+ int reg, i;
+
+ reg = read_phy_reg(ohci, 2);
+ if (reg < 0)
+ return reg;
+ if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS)
+ return 0;
+
+ for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) {
+ reg = read_paged_phy_reg(ohci, 1, i + 10);
+ if (reg < 0)
+ return reg;
+ if (reg != id[i])
+ return 0;
+ }
+ return 1;
+}
+
static int ohci_enable(struct fw_card *card,
const __be32 *config_rom, size_t length)
{
@@ -2078,6 +2239,16 @@ static int ohci_enable(struct fw_card *card,
return -EIO;
}
+ if (ohci->quirks & QUIRK_TI_SLLZ059) {
+ ret = probe_tsb41ba3d(ohci);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ fw_notify("local TSB41BA3D phy\n");
+ else
+ ohci->quirks &= ~QUIRK_TI_SLLZ059;
+ }
+
reg_write(ohci, OHCI1394_HCControlClear,
OHCI1394_HCControl_noByteSwapData);
@@ -2175,8 +2346,13 @@ static int ohci_enable(struct fw_card *card,
ohci_driver_name, ohci)) {
fw_error("Failed to allocate interrupt %d.\n", dev->irq);
pci_disable_msi(dev);
- dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
- ohci->config_rom, ohci->config_rom_bus);
+
+ if (config_rom) {
+ dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+ ohci->next_config_rom,
+ ohci->next_config_rom_bus);
+ ohci->next_config_rom = NULL;
+ }
return -EIO;
}
@@ -2204,7 +2380,9 @@ static int ohci_enable(struct fw_card *card,
OHCI1394_LinkControl_rcvPhyPkt);
ar_context_run(&ohci->ar_request_ctx);
- ar_context_run(&ohci->ar_response_ctx); /* also flushes writes */
+ ar_context_run(&ohci->ar_response_ctx);
+
+ flush_writes(ohci);
/* We are ready to go, reset bus to finish initialization. */
fw_schedule_bus_reset(&ohci->card, false, true);
@@ -2246,7 +2424,7 @@ static int ohci_set_config_rom(struct fw_card *card,
* then set up the real values for the two registers.
*
* We use ohci->lock to avoid racing with the code that sets
- * ohci->next_config_rom to NULL (see bus_reset_tasklet).
+ * ohci->next_config_rom to NULL (see bus_reset_work).
*/
next_config_rom =
@@ -2525,6 +2703,7 @@ static int handle_ir_packet_per_buffer(struct context *context,
struct iso_context *ctx =
container_of(context, struct iso_context, context);
struct descriptor *pd;
+ u32 buffer_dma;
__le32 *ir_header;
void *p;
@@ -2535,6 +2714,16 @@ static int handle_ir_packet_per_buffer(struct context *context,
/* Descriptor(s) not done yet, stop iteration */
return 0;
+ while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) {
+ d++;
+ buffer_dma = le32_to_cpu(d->data_address);
+ dma_sync_single_range_for_cpu(context->ohci->card.device,
+ buffer_dma & PAGE_MASK,
+ buffer_dma & ~PAGE_MASK,
+ le16_to_cpu(d->req_count),
+ DMA_FROM_DEVICE);
+ }
+
p = last + 1;
copy_iso_headers(ctx, p);
@@ -2557,11 +2746,19 @@ static int handle_ir_buffer_fill(struct context *context,
{
struct iso_context *ctx =
container_of(context, struct iso_context, context);
+ u32 buffer_dma;
if (last->res_count != 0)
/* Descriptor(s) not done yet, stop iteration */
return 0;
+ buffer_dma = le32_to_cpu(last->data_address);
+ dma_sync_single_range_for_cpu(context->ohci->card.device,
+ buffer_dma & PAGE_MASK,
+ buffer_dma & ~PAGE_MASK,
+ le16_to_cpu(last->req_count),
+ DMA_FROM_DEVICE);
+
if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
ctx->base.callback.mc(&ctx->base,
le32_to_cpu(last->data_address) +
@@ -2571,6 +2768,43 @@ static int handle_ir_buffer_fill(struct context *context,
return 1;
}
+static inline void sync_it_packet_for_cpu(struct context *context,
+ struct descriptor *pd)
+{
+ __le16 control;
+ u32 buffer_dma;
+
+ /* only packets beginning with OUTPUT_MORE* have data buffers */
+ if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
+ return;
+
+ /* skip over the OUTPUT_MORE_IMMEDIATE descriptor */
+ pd += 2;
+
+ /*
+ * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's
+ * data buffer is in the context program's coherent page and must not
+ * be synced.
+ */
+ if ((le32_to_cpu(pd->data_address) & PAGE_MASK) ==
+ (context->current_bus & PAGE_MASK)) {
+ if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
+ return;
+ pd++;
+ }
+
+ do {
+ buffer_dma = le32_to_cpu(pd->data_address);
+ dma_sync_single_range_for_cpu(context->ohci->card.device,
+ buffer_dma & PAGE_MASK,
+ buffer_dma & ~PAGE_MASK,
+ le16_to_cpu(pd->req_count),
+ DMA_TO_DEVICE);
+ control = pd->control;
+ pd++;
+ } while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)));
+}
+
static int handle_it_packet(struct context *context,
struct descriptor *d,
struct descriptor *last)
@@ -2587,6 +2821,8 @@ static int handle_it_packet(struct context *context,
/* Descriptor(s) not done yet, stop iteration */
return 0;
+ sync_it_packet_for_cpu(context, d);
+
i = ctx->header_length;
if (i + 4 < PAGE_SIZE) {
/* Present this value as big-endian to match the receive code */
@@ -2956,6 +3192,10 @@ static int queue_iso_transmit(struct iso_context *ctx,
page_bus = page_private(buffer->pages[page]);
pd[i].data_address = cpu_to_le32(page_bus + offset);
+ dma_sync_single_range_for_device(ctx->context.ohci->card.device,
+ page_bus, offset, length,
+ DMA_TO_DEVICE);
+
payload_index += length;
}
@@ -2980,6 +3220,7 @@ static int queue_iso_packet_per_buffer(struct iso_context *ctx,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
+ struct device *device = ctx->context.ohci->card.device;
struct descriptor *d, *pd;
dma_addr_t d_bus, page_bus;
u32 z, header_z, rest;
@@ -3034,6 +3275,10 @@ static int queue_iso_packet_per_buffer(struct iso_context *ctx,
page_bus = page_private(buffer->pages[page]);
pd->data_address = cpu_to_le32(page_bus + offset);
+ dma_sync_single_range_for_device(device, page_bus,
+ offset, length,
+ DMA_FROM_DEVICE);
+
offset = (offset + length) & ~PAGE_MASK;
rest -= length;
if (offset == 0)
@@ -3093,6 +3338,10 @@ static int queue_iso_buffer_fill(struct iso_context *ctx,
page_bus = page_private(buffer->pages[page]);
d->data_address = cpu_to_le32(page_bus + offset);
+ dma_sync_single_range_for_device(ctx->context.ohci->card.device,
+ page_bus, offset, length,
+ DMA_FROM_DEVICE);
+
rest -= length;
offset = 0;
page++;
@@ -3135,7 +3384,6 @@ static void ohci_flush_queue_iso(struct fw_iso_context *base)
&container_of(base, struct iso_context, base)->context;
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
- flush_writes(ctx->ohci);
}
static const struct fw_card_driver ohci_driver = {
@@ -3225,8 +3473,7 @@ static int __devinit pci_probe(struct pci_dev *dev,
spin_lock_init(&ohci->lock);
mutex_init(&ohci->phy_reg_mutex);
- tasklet_init(&ohci->bus_reset_tasklet,
- bus_reset_tasklet, (unsigned long)ohci);
+ INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
err = pci_request_region(dev, 0, ohci_driver_name);
if (err) {
@@ -3368,6 +3615,7 @@ static void pci_remove(struct pci_dev *dev)
ohci = pci_get_drvdata(dev);
reg_write(ohci, OHCI1394_IntMaskClear, ~0);
flush_writes(ohci);
+ cancel_work_sync(&ohci->bus_reset_work);
fw_core_remove_card(&ohci->card);
/*
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 17cef86..68375bc 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -154,12 +154,16 @@ struct sbp2_logical_unit {
bool blocked;
};
+static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
+{
+ queue_delayed_work(fw_workqueue, &lu->work, delay);
+}
+
/*
* We create one struct sbp2_target per IEEE 1212 Unit Directory
* and one struct Scsi_Host per sbp2_target.
*/
struct sbp2_target {
- struct kref kref;
struct fw_unit *unit;
const char *bus_id;
struct list_head lu_list;
@@ -772,71 +776,6 @@ static int sbp2_lun2int(u16 lun)
return scsilun_to_int(&eight_bytes_lun);
}
-static void sbp2_release_target(struct kref *kref)
-{
- struct sbp2_target *tgt = container_of(kref, struct sbp2_target, kref);
- struct sbp2_logical_unit *lu, *next;
- struct Scsi_Host *shost =
- container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
- struct scsi_device *sdev;
- struct fw_device *device = target_device(tgt);
-
- /* prevent deadlocks */
- sbp2_unblock(tgt);
-
- list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
- sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
- if (sdev) {
- scsi_remove_device(sdev);
- scsi_device_put(sdev);
- }
- if (lu->login_id != INVALID_LOGIN_ID) {
- int generation, node_id;
- /*
- * tgt->node_id may be obsolete here if we failed
- * during initial login or after a bus reset where
- * the topology changed.
- */
- generation = device->generation;
- smp_rmb(); /* node_id vs. generation */
- node_id = device->node_id;
- sbp2_send_management_orb(lu, node_id, generation,
- SBP2_LOGOUT_REQUEST,
- lu->login_id, NULL);
- }
- fw_core_remove_address_handler(&lu->address_handler);
- list_del(&lu->link);
- kfree(lu);
- }
- scsi_remove_host(shost);
- fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no);
-
- fw_unit_put(tgt->unit);
- scsi_host_put(shost);
- fw_device_put(device);
-}
-
-static void sbp2_target_get(struct sbp2_target *tgt)
-{
- kref_get(&tgt->kref);
-}
-
-static void sbp2_target_put(struct sbp2_target *tgt)
-{
- kref_put(&tgt->kref, sbp2_release_target);
-}
-
-/*
- * Always get the target's kref when scheduling work on one its units.
- * Each workqueue job is responsible to call sbp2_target_put() upon return.
- */
-static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
-{
- sbp2_target_get(lu->tgt);
- if (!queue_delayed_work(fw_workqueue, &lu->work, delay))
- sbp2_target_put(lu->tgt);
-}
-
/*
* Write retransmit retry values into the BUSY_TIMEOUT register.
* - The single-phase retry protocol is supported by all SBP-2 devices, but the
@@ -877,7 +816,7 @@ static void sbp2_login(struct work_struct *work)
int generation, node_id, local_node_id;
if (fw_device_is_shutdown(device))
- goto out;
+ return;
generation = device->generation;
smp_rmb(); /* node IDs must not be older than generation */
@@ -899,7 +838,7 @@ static void sbp2_login(struct work_struct *work)
/* Let any waiting I/O fail from now on. */
sbp2_unblock(lu->tgt);
}
- goto out;
+ return;
}
tgt->node_id = node_id;
@@ -925,7 +864,8 @@ static void sbp2_login(struct work_struct *work)
if (lu->has_sdev) {
sbp2_cancel_orbs(lu);
sbp2_conditionally_unblock(lu);
- goto out;
+
+ return;
}
if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
@@ -957,7 +897,8 @@ static void sbp2_login(struct work_struct *work)
lu->has_sdev = true;
scsi_device_put(sdev);
sbp2_allow_block(lu);
- goto out;
+
+ return;
out_logout_login:
smp_rmb(); /* generation may have changed */
@@ -971,8 +912,57 @@ static void sbp2_login(struct work_struct *work)
* lu->work already. Reset the work from reconnect to login.
*/
PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
- out:
- sbp2_target_put(tgt);
+}
+
+static void sbp2_reconnect(struct work_struct *work)
+{
+ struct sbp2_logical_unit *lu =
+ container_of(work, struct sbp2_logical_unit, work.work);
+ struct sbp2_target *tgt = lu->tgt;
+ struct fw_device *device = target_device(tgt);
+ int generation, node_id, local_node_id;
+
+ if (fw_device_is_shutdown(device))
+ return;
+
+ generation = device->generation;
+ smp_rmb(); /* node IDs must not be older than generation */
+ node_id = device->node_id;
+ local_node_id = device->card->node_id;
+
+ if (sbp2_send_management_orb(lu, node_id, generation,
+ SBP2_RECONNECT_REQUEST,
+ lu->login_id, NULL) < 0) {
+ /*
+ * If reconnect was impossible even though we are in the
+ * current generation, fall back and try to log in again.
+ *
+ * We could check for "Function rejected" status, but
+ * looking at the bus generation as simpler and more general.
+ */
+ smp_rmb(); /* get current card generation */
+ if (generation == device->card->generation ||
+ lu->retries++ >= 5) {
+ fw_error("%s: failed to reconnect\n", tgt->bus_id);
+ lu->retries = 0;
+ PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ }
+ sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
+
+ return;
+ }
+
+ tgt->node_id = node_id;
+ tgt->address_high = local_node_id << 16;
+ smp_wmb(); /* node IDs must not be older than generation */
+ lu->generation = generation;
+
+ fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
+ tgt->bus_id, lu->lun, lu->retries);
+
+ sbp2_agent_reset(lu);
+ sbp2_cancel_orbs(lu);
+ sbp2_conditionally_unblock(lu);
}
static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
@@ -1120,6 +1110,7 @@ static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
}
static struct scsi_host_template scsi_driver_template;
+static int sbp2_remove(struct device *dev);
static int sbp2_probe(struct device *dev)
{
@@ -1141,7 +1132,6 @@ static int sbp2_probe(struct device *dev)
tgt = (struct sbp2_target *)shost->hostdata;
dev_set_drvdata(&unit->device, tgt);
tgt->unit = unit;
- kref_init(&tgt->kref);
INIT_LIST_HEAD(&tgt->lu_list);
tgt->bus_id = dev_name(&unit->device);
tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
@@ -1154,9 +1144,6 @@ static int sbp2_probe(struct device *dev)
if (scsi_add_host(shost, &unit->device) < 0)
goto fail_shost_put;
- fw_device_get(device);
- fw_unit_get(unit);
-
/* implicit directory ID */
tgt->directory_id = ((unit->directory - device->config_rom) * 4
+ CSR_CONFIG_ROM) & 0xffffff;
@@ -1166,7 +1153,7 @@ static int sbp2_probe(struct device *dev)
if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
&firmware_revision) < 0)
- goto fail_tgt_put;
+ goto fail_remove;
sbp2_clamp_management_orb_timeout(tgt);
sbp2_init_workarounds(tgt, model, firmware_revision);
@@ -1177,16 +1164,17 @@ static int sbp2_probe(struct device *dev)
* specifies the max payload size as 2 ^ (max_payload + 2), so
* if we set this to max_speed + 7, we get the right value.
*/
- tgt->max_payload = min(device->max_speed + 7, 10U);
- tgt->max_payload = min(tgt->max_payload, device->card->max_receive - 1);
+ tgt->max_payload = min3(device->max_speed + 7, 10U,
+ device->card->max_receive - 1);
/* Do the login in a workqueue so we can easily reschedule retries. */
list_for_each_entry(lu, &tgt->lu_list, link)
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
+
return 0;
- fail_tgt_put:
- sbp2_target_put(tgt);
+ fail_remove:
+ sbp2_remove(dev);
return -ENOMEM;
fail_shost_put:
@@ -1194,71 +1182,6 @@ static int sbp2_probe(struct device *dev)
return -ENOMEM;
}
-static int sbp2_remove(struct device *dev)
-{
- struct fw_unit *unit = fw_unit(dev);
- struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
- struct sbp2_logical_unit *lu;
-
- list_for_each_entry(lu, &tgt->lu_list, link)
- cancel_delayed_work_sync(&lu->work);
-
- sbp2_target_put(tgt);
- return 0;
-}
-
-static void sbp2_reconnect(struct work_struct *work)
-{
- struct sbp2_logical_unit *lu =
- container_of(work, struct sbp2_logical_unit, work.work);
- struct sbp2_target *tgt = lu->tgt;
- struct fw_device *device = target_device(tgt);
- int generation, node_id, local_node_id;
-
- if (fw_device_is_shutdown(device))
- goto out;
-
- generation = device->generation;
- smp_rmb(); /* node IDs must not be older than generation */
- node_id = device->node_id;
- local_node_id = device->card->node_id;
-
- if (sbp2_send_management_orb(lu, node_id, generation,
- SBP2_RECONNECT_REQUEST,
- lu->login_id, NULL) < 0) {
- /*
- * If reconnect was impossible even though we are in the
- * current generation, fall back and try to log in again.
- *
- * We could check for "Function rejected" status, but
- * looking at the bus generation as simpler and more general.
- */
- smp_rmb(); /* get current card generation */
- if (generation == device->card->generation ||
- lu->retries++ >= 5) {
- fw_error("%s: failed to reconnect\n", tgt->bus_id);
- lu->retries = 0;
- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
- }
- sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
- goto out;
- }
-
- tgt->node_id = node_id;
- tgt->address_high = local_node_id << 16;
- smp_wmb(); /* node IDs must not be older than generation */
- lu->generation = generation;
-
- fw_notify("%s: reconnected to LUN %04x (%d retries)\n",
- tgt->bus_id, lu->lun, lu->retries);
-
- sbp2_agent_reset(lu);
- sbp2_cancel_orbs(lu);
- sbp2_conditionally_unblock(lu);
- out:
- sbp2_target_put(tgt);
-}
-
static void sbp2_update(struct fw_unit *unit)
{
struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
@@ -1277,6 +1200,51 @@ static void sbp2_update(struct fw_unit *unit)
}
}
+static int sbp2_remove(struct device *dev)
+{
+ struct fw_unit *unit = fw_unit(dev);
+ struct fw_device *device = fw_parent_device(unit);
+ struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
+ struct sbp2_logical_unit *lu, *next;
+ struct Scsi_Host *shost =
+ container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
+ struct scsi_device *sdev;
+
+ /* prevent deadlocks */
+ sbp2_unblock(tgt);
+
+ list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
+ cancel_delayed_work_sync(&lu->work);
+ sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ }
+ if (lu->login_id != INVALID_LOGIN_ID) {
+ int generation, node_id;
+ /*
+ * tgt->node_id may be obsolete here if we failed
+ * during initial login or after a bus reset where
+ * the topology changed.
+ */
+ generation = device->generation;
+ smp_rmb(); /* node_id vs. generation */
+ node_id = device->node_id;
+ sbp2_send_management_orb(lu, node_id, generation,
+ SBP2_LOGOUT_REQUEST,
+ lu->login_id, NULL);
+ }
+ fw_core_remove_address_handler(&lu->address_handler);
+ list_del(&lu->link);
+ kfree(lu);
+ }
+ scsi_remove_host(shost);
+ fw_notify("released %s, target %d:0:0\n", tgt->bus_id, shost->host_no);
+
+ scsi_host_put(shost);
+ return 0;
+}
+
#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
#define SBP2_SW_VERSION_ENTRY 0x00010483